def __init__(self,indim,outdim,hiddim=6):
        self.network=RecurrentNetwork()

        ##CREATE MODULES
        self._in_layer = LinearLayer(indim+outdim)
        self._hid_layer = LSTMLayer(hiddim,peepholes=False)
        self._out_layer = LinearLayer(outdim)
        self._bias = BiasUnit()

        ##ADD MODULES
        self.network.addInputModule(self._in_layer)
        self.network.addModule(self._hid_layer)
        self.network.addModule(self._bias)
        self.network.addOutputModule(self._out_layer)
        self._last_hidden_layer = None
        self._first_hidden_layer = None

        ###CREATE CONNECTIONS
        self._hid_to_out_connection = FullConnection(self._hid_layer , self._out_layer)
        self._in_to_hid_connection = FullConnection(self._in_layer  , self._hid_layer)
        self._out_to_hid_connection=FullConnection(self._out_layer,self._hid_layer)

        ##ADD CONNECTIONS
        self.network.addConnection(self._hid_to_out_connection)
        self.network.addConnection(self._in_to_hid_connection)
        self.network.addConnection(FullConnection(self._bias, self._hid_layer))
        self.network.addRecurrentConnection(self._out_to_hid_connection)
        self.network.sortModules()

        self.backprojectionFactor = 1
示例#2
0
    def __init__(self, indim, outdim, hiddim=6):
        Module.__init__(self, indim, outdim)

        self._network = RecurrentNetwork()
        self._in_layer = LinearLayer(indim + outdim)
        self._hid_layer = LSTMLayer(hiddim)
        self._out_layer = LinearLayer(outdim)
        self._bias = BiasUnit()

        self._network.addInputModule(self._in_layer)
        self._network.addModule(self._hid_layer)
        self._network.addModule(self._bias)
        self._network.addOutputModule(self._out_layer)

        self._hid_to_out_connection = FullConnection(self._hid_layer , self._out_layer)
        self._in_to_hid_connection = FullConnection(self._in_layer  , self._hid_layer)
        self._network.addConnection(self._hid_to_out_connection)
        self._network.addConnection(self._in_to_hid_connection)
        self._network.addConnection(FullConnection(self._bias, self._hid_layer))

        self._network.sortModules()

        self.offset = self._network.offset
        self.backprojectionFactor = 1.0

        return
示例#3
0
    def __init__(self, outdim, hiddim=15):
        """ Create an EvolinoNetwork with for sequences of dimension outdim and
        hiddim dimension of the RNN Layer."""
        indim = 0
        Module.__init__(self, indim, outdim)

        self._network = RecurrentNetwork()
        self._in_layer = LinearLayer(indim + outdim)
        self._hid_layer = LSTMLayer(hiddim)
        self._out_layer = LinearLayer(outdim)
        self._bias = BiasUnit()

        self._network.addInputModule(self._in_layer)
        self._network.addModule(self._hid_layer)
        self._network.addModule(self._bias)
        self._network.addOutputModule(self._out_layer)

        self._in_to_hid_connection = FullConnection(self._in_layer,
                                                    self._hid_layer)
        self._bias_to_hid_connection = FullConnection(self._bias,
                                                      self._hid_layer)
        self._hid_to_out_connection = FullConnection(self._hid_layer,
                                                     self._out_layer)
        self._network.addConnection(self._in_to_hid_connection)
        self._network.addConnection(self._bias_to_hid_connection)
        self._network.addConnection(self._hid_to_out_connection)

        self._recurrent_connection = FullConnection(self._hid_layer,
                                                    self._hid_layer)
        self._network.addRecurrentConnection(self._recurrent_connection)

        self._network.sortModules()
        self._network.reset()

        self.offset = self._network.offset
        self.backprojectionFactor = 0.01
示例#4
0
    def __init__(self, outdim, hiddim=15):
        """ Create an EvolinoNetwork with for sequences of dimension outdim and
        hiddim dimension of the RNN Layer."""
        indim = 0
        Module.__init__(self, indim, outdim)

        self._network = RecurrentNetwork()
        self._in_layer = LinearLayer(indim + outdim)
        self._hid_layer = LSTMLayer(hiddim)
        self._out_layer = LinearLayer(outdim)
        self._bias = BiasUnit()

        self._network.addInputModule(self._in_layer)
        self._network.addModule(self._hid_layer)
        self._network.addModule(self._bias)
        self._network.addOutputModule(self._out_layer)

        self._in_to_hid_connection = FullConnection(self._in_layer,
                                                    self._hid_layer)
        self._bias_to_hid_connection = FullConnection(self._bias,
                                                      self._hid_layer)
        self._hid_to_out_connection = FullConnection(self._hid_layer,
                                                     self._out_layer)
        self._network.addConnection(self._in_to_hid_connection)
        self._network.addConnection(self._bias_to_hid_connection)
        self._network.addConnection(self._hid_to_out_connection)

        self._recurrent_connection = FullConnection(self._hid_layer,
                                                    self._hid_layer)
        self._network.addRecurrentConnection(self._recurrent_connection)

        self._network.sortModules()
        self._network.reset()

        self.offset = self._network.offset
        self.backprojectionFactor = 0.01
示例#5
0
class EvolinoNetwork(Module):
    """ Model class to be trained by the EvolinoTrainer."""

    def __init__(self, outdim, hiddim=15):
        """ Create an EvolinoNetwork with for sequences of dimension outdim and
        hiddim dimension of the RNN Layer."""
        indim = 0
        Module.__init__(self, indim, outdim)

        self._network = RecurrentNetwork()
        self._in_layer = LinearLayer(indim + outdim)
        self._hid_layer = LSTMLayer(hiddim)
        self._out_layer = LinearLayer(outdim)
        self._bias = BiasUnit()

        self._network.addInputModule(self._in_layer)
        self._network.addModule(self._hid_layer)
        self._network.addModule(self._bias)
        self._network.addOutputModule(self._out_layer)

        self._in_to_hid_connection = FullConnection(self._in_layer,
                                                    self._hid_layer)
        self._bias_to_hid_connection = FullConnection(self._bias,
                                                      self._hid_layer)
        self._hid_to_out_connection = FullConnection(self._hid_layer,
                                                     self._out_layer)
        self._network.addConnection(self._in_to_hid_connection)
        self._network.addConnection(self._bias_to_hid_connection)
        self._network.addConnection(self._hid_to_out_connection)

        self._recurrent_connection = FullConnection(self._hid_layer,
                                                    self._hid_layer)
        self._network.addRecurrentConnection(self._recurrent_connection)

        self._network.sortModules()
        self._network.reset()

        self.offset = self._network.offset
        self.backprojectionFactor = 0.01

    def reset(self):
        """ Resets the underlying network """
        self._network.reset()

    def washout(self, sequence):
        """ Force the network to process the sequence instead of the
        backprojection values. Used for adjusting the RNN's state. Returns the
        outputs of the RNN that are needed for linear regression."""
        assert len(sequence) != 0
        assert self.outdim == len(sequence[0])

        raw_outputs = []
        for val in sequence:
            backprojection = self._getLastOutput()
            backprojection *= self.backprojectionFactor
            self._activateNetwork(backprojection)
            raw_out = self._getRawOutput()
            raw_outputs.append(raw_out)
            self._setLastOutput(val)

        return array(raw_outputs)

    def _activateNetwork(self, input):
        """ Run the activate method of the underlying network."""
        assert len(input) == self._network.indim
        output = array(self._network.activate(input))
        self.offset = self._network.offset
        return output

    def activate(self, input):
        raise NotImplementedError(
            '.activate() is not supported, use .extrapolate()')

    def extrapolate(self, sequence, length):
        """ Extrapolate 'sequence' for 'length' steps and return the
        extrapolated sequence as array.

        Extrapolating is realized by reseting the network, then washing it out
        with the supplied  sequence, and then generating a sequence."""
        self.reset()
        self.washout(sequence)
        return self.generate(length)

    def generate(self, length):
        """ Generate a sequence of specified length.

        Use .reset() and .washout() before."""
        generated_sequence = [] #empty(length)
        for _ in xrange(length):
            backprojection = self._getLastOutput()
            backprojection *= self.backprojectionFactor
            out = self._activateNetwork(backprojection)
            generated_sequence.append(out)

        return array(generated_sequence)

    def _getLastOutput(self):
        """Return the current output of the linear output layer."""
        if self.offset == 0:
            return zeros(self.outdim)
        else:
            return self._out_layer.outputbuffer[self.offset - 1]

    def _setLastOutput(self, output):
        """Force the current output of the linear output layer to 'output'."""
        self._out_layer.outputbuffer[self.offset - 1][:] = output

    #
    # Genome related
    #

    def _validateGenomeLayer(self, layer):
        """Validate the type and state of a layer."""
        assert isinstance(layer, LSTMLayer)
        assert not layer.peepholes

    def getGenome(self):
        """Return the RNN's Genome."""
        return self._getGenomeOfLayer(self._hid_layer)

    def setGenome(self, weights):
        """Set the RNN's Genome."""
        weights = deepcopy(weights)
        self._setGenomeOfLayer(self._hid_layer, weights)

    def _getGenomeOfLayer(self, layer):
        """Return the genome of a single layer."""
        self._validateGenomeLayer(layer)

        connections = self._getInputConnectionsOfLayer(layer)

        layer_weights = []
        # iterate cells of layer
        for cell_idx in range(layer.outdim):
            # todo: the evolino paper uses a different order of weights for the genotype of a lstm cell
            cell_weights = []
            # iterate weight types (ingate, forgetgate, cell and outgate)
            for t in range(4):
                # iterate connections
                for c in connections:
                    # iterate sources of connection
                    for i in range(c.indim):
                        idx = i + cell_idx * c.indim + t * layer.outdim * c.indim
                        cell_weights.append(c.params[idx])

            layer_weights.append(cell_weights)

        return layer_weights

    def _setGenomeOfLayer(self, layer, weights):
        """Set the genome of a single layer."""
        self._validateGenomeLayer(layer)

        connections = self._getInputConnectionsOfLayer(layer)

        # iterate cells of layer
        for cell_idx in range(layer.outdim):
            # todo: the evolino paper uses a different order of weights for the genotype of a lstm cell
            cell_weights = weights[cell_idx]
            # iterate weight types (ingate, forgetgate, cell and outgate)
            for t in range(4):
                # iterate connections
                for c in connections:
                    # iterate sources of connection
                    for i in range(c.indim):
                        idx = i + cell_idx * c.indim + t * layer.outdim * c.indim
                        c.params[idx] = cell_weights.pop(0)

    #
    #  Linear Regression related
    #

    def setOutputWeightMatrix(self, W):
        """Set the weight matrix of the linear output layer."""
        c = self._hid_to_out_connection
        c.params[:] = W.flatten()

    def getOutputWeightMatrix(self):
        """Return the weight matrix of the linear output layer."""
        c = self._hid_to_out_connection
        p = c.params
        return reshape(p, (c.outdim, c.indim))

    def _getRawOutput(self):
        """Return the current output of the RNN. This is needed for linear
        regression, which calculates the weight matrix of the linear output
        layer."""
        return copy(self._hid_layer.outputbuffer[self.offset - 1])

    #
    # Topology Helper
    #

    def _getInputConnectionsOfLayer(self, layer):
        """Return a list of all input connections for the layer."""
        connections = []
        all_cons = list(self._network.recurrentConns)
        all_cons += sum(self._network.connections.values(), [])
        for c in all_cons:
            if c.outmod is layer:
                if not isinstance(c, FullConnection):
                    raise NotImplementedError(
                        "Only FullConnections are supported")
                connections.append(c)
        return connections
示例#6
0
class EvolinoNetwork(Module):
    """ Model class to be trained by the EvolinoTrainer."""
    def __init__(self, outdim, hiddim=15):
        """ Create an EvolinoNetwork with for sequences of dimension outdim and
        hiddim dimension of the RNN Layer."""
        indim = 0
        Module.__init__(self, indim, outdim)

        self._network = RecurrentNetwork()
        self._in_layer = LinearLayer(indim + outdim)
        self._hid_layer = LSTMLayer(hiddim)
        self._out_layer = LinearLayer(outdim)
        self._bias = BiasUnit()

        self._network.addInputModule(self._in_layer)
        self._network.addModule(self._hid_layer)
        self._network.addModule(self._bias)
        self._network.addOutputModule(self._out_layer)

        self._in_to_hid_connection = FullConnection(self._in_layer,
                                                    self._hid_layer)
        self._bias_to_hid_connection = FullConnection(self._bias,
                                                      self._hid_layer)
        self._hid_to_out_connection = FullConnection(self._hid_layer,
                                                     self._out_layer)
        self._network.addConnection(self._in_to_hid_connection)
        self._network.addConnection(self._bias_to_hid_connection)
        self._network.addConnection(self._hid_to_out_connection)

        self._recurrent_connection = FullConnection(self._hid_layer,
                                                    self._hid_layer)
        self._network.addRecurrentConnection(self._recurrent_connection)

        self._network.sortModules()
        self._network.reset()

        self.offset = self._network.offset
        self.backprojectionFactor = 0.01

    def reset(self):
        """ Resets the underlying network """
        self._network.reset()

    def washout(self, sequence):
        """ Force the network to process the sequence instead of the 
        backprojection values. Used for adjusting the RNN's state. Returns the 
        outputs of the RNN that are needed for linear regression."""
        assert len(sequence) != 0
        assert self.outdim == len(sequence[0])

        raw_outputs = []
        for val in sequence:
            backprojection = self._getLastOutput()
            backprojection *= self.backprojectionFactor
            self._activateNetwork(backprojection)
            raw_out = self._getRawOutput()
            raw_outputs.append(raw_out)
            self._setLastOutput(val)

        return array(raw_outputs)

    def _activateNetwork(self, input):
        """ Run the activate method of the underlying network."""
        assert len(input) == self._network.indim
        output = array(self._network.activate(input))
        self.offset = self._network.offset
        return output

    def activate(self, input):
        raise NotImplementedError(
            '.activate() is not supported, use .extrapolate()')

    def extrapolate(self, sequence, length):
        """ Extrapolate 'sequence' for 'length' steps and return the 
        extrapolated sequence as array.
            
        Extrapolating is realized by reseting the network, then washing it out
        with the supplied  sequence, and then generating a sequence."""
        self.reset()
        self.washout(sequence)
        return self.generate(length)

    def generate(self, length):
        """ Generate a sequence of specified length. 
        
        Use .reset() and .washout() before."""
        generated_sequence = []  #empty(length)
        for _ in range(length):
            backprojection = self._getLastOutput()
            backprojection *= self.backprojectionFactor
            out = self._activateNetwork(backprojection)
            generated_sequence.append(out)

        return array(generated_sequence)

    def _getLastOutput(self):
        """Return the current output of the linear output layer."""
        if self.offset == 0:
            return zeros(self.outdim)
        else:
            return self._out_layer.outputbuffer[self.offset - 1]

    def _setLastOutput(self, output):
        """Force the current output of the linear output layer to 'output'."""
        self._out_layer.outputbuffer[self.offset - 1][:] = output

    #
    # Genome related
    #

    def _validateGenomeLayer(self, layer):
        """Validate the type and state of a layer."""
        assert isinstance(layer, LSTMLayer)
        assert not layer.peepholes

    def getGenome(self):
        """Return the RNN's Genome."""
        return self._getGenomeOfLayer(self._hid_layer)

    def setGenome(self, weights):
        """Set the RNN's Genome."""
        weights = deepcopy(weights)
        self._setGenomeOfLayer(self._hid_layer, weights)

    def _getGenomeOfLayer(self, layer):
        """Return the genome of a single layer."""
        self._validateGenomeLayer(layer)

        connections = self._getInputConnectionsOfLayer(layer)

        layer_weights = []
        # iterate cells of layer
        for cell_idx in range(layer.outdim):
            # todo: the evolino paper uses a different order of weights for the genotype of a lstm cell
            cell_weights = []
            # iterate weight types (ingate, forgetgate, cell and outgate)
            for t in range(4):
                # iterate connections
                for c in connections:
                    # iterate sources of connection
                    for i in range(c.indim):
                        idx = i + cell_idx * c.indim + t * layer.outdim * c.indim
                        cell_weights.append(c.params[idx])

            layer_weights.append(cell_weights)

        return layer_weights

    def _setGenomeOfLayer(self, layer, weights):
        """Set the genome of a single layer."""
        self._validateGenomeLayer(layer)

        connections = self._getInputConnectionsOfLayer(layer)

        # iterate cells of layer
        for cell_idx in range(layer.outdim):
            # todo: the evolino paper uses a different order of weights for the genotype of a lstm cell
            cell_weights = weights[cell_idx]
            # iterate weight types (ingate, forgetgate, cell and outgate)
            for t in range(4):
                # iterate connections
                for c in connections:
                    # iterate sources of connection
                    for i in range(c.indim):
                        idx = i + cell_idx * c.indim + t * layer.outdim * c.indim
                        c.params[idx] = cell_weights.pop(0)

    #
    #  Linear Regression related
    #

    def setOutputWeightMatrix(self, W):
        """Set the weight matrix of the linear output layer."""
        c = self._hid_to_out_connection
        c.params[:] = W.flatten()

    def getOutputWeightMatrix(self):
        """Return the weight matrix of the linear output layer."""
        c = self._hid_to_out_connection
        p = c.params
        return reshape(p, (c.outdim, c.indim))

    def _getRawOutput(self):
        """Return the current output of the RNN. This is needed for linear
        regression, which calculates the weight matrix of the linear output 
        layer."""
        return copy(self._hid_layer.outputbuffer[self.offset - 1])

    #
    # Topology Helper
    #

    def _getInputConnectionsOfLayer(self, layer):
        """Return a list of all input connections for the layer."""
        connections = []
        all_cons = list(self._network.recurrentConns)
        all_cons += sum(list(self._network.connections.values()), [])
        for c in all_cons:
            if c.outmod is layer:
                if not isinstance(c, FullConnection):
                    raise NotImplementedError(
                        "Only FullConnections are supported")
                connections.append(c)
        return connections
    print(identifier, net.activate((0, 0)), net.activate((0, 1)),
          net.activate((1, 0)), net.activate((1, 1)))


ds = SupervisedDataSet(2, 1)

ds.addSample((0, 0), (0, ))
ds.addSample((0, 1), (1, ))
ds.addSample((1, 0), (1, ))
ds.addSample((1, 1), (0, ))

for input, target in ds:
    print(input, target)

#define net
net = RecurrentNetwork()
net.addInputModule(LinearLayer(2, name="il"))
net.addModule(SigmoidLayer(4, name="h1"))
net.addModule(SigmoidLayer(4, name="h2"))
net.addOutputModule(LinearLayer(1, name="ol"))
c1 = FullConnection(net["il"], net["h1"])
c2 = FullConnection(net["h1"], net["h2"])
c3 = FullConnection(net["h2"], net["ol"])
cr1 = FullConnection(net["h1"], net["h1"])
net.addConnection(c1)
net.addConnection(c2)
net.addConnection(c3)
net.addRecurrentConnection(cr1)
net.sortModules()

print(net)
示例#8
0
def buildSimpleLSTMNetwork(peepholes=False):
    N = RecurrentNetwork('simpleLstmNet')
    i = LinearLayer(1, name='i')
    h = LSTMLayer(1, peepholes=peepholes, name='lstm')
    o = LinearLayer(1, name='o')
    b = BiasUnit('bias')
    N.addModule(b)
    N.addOutputModule(o)
    N.addInputModule(i)
    N.addModule(h)
    N.addConnection(FullConnection(i, h, name='f1'))
    N.addConnection(FullConnection(b, h, name='f2'))
    N.addRecurrentConnection(FullConnection(h, h, name='r1'))
    N.addConnection(FullConnection(h, o, name='r1'))
    N.sortModules()
    return N
def mk_nn(mx_lag):
    
    #Construct the Neural Network
    #n = FeedForwardNetwork()
    n = RecurrentNetwork()
    n.addInputModule(LinearLayer(mx_lag, name='in'))
    n.addModule(BiasUnit('bias'))
    n.addModule(LSTMLayer(5, name='hidden1'))
    n.addModule(LSTMLayer(5, name='hidden2'))
    #n.addModule(TanhLayer(10, name='hidden2'))
    #n.addModule(TanhLayer(10, name='hidden3'))
    n.addOutputModule(LinearLayer(1, name='out'))
    
    #add connections
    n.addConnection(FullConnection(n['in'], n['hidden1'], name='c1'))
    n.addConnection(FullConnection(n['hidden1'], n['hidden2'], name='c2'))
    n.addConnection(FullConnection(n['bias'], n['hidden1'], name='c3'))
    #n.addConnection(FullConnection(n['hidden2'], n['hidden3'], name='c5'))
    #n.addRecurrentConnection(FullConnection(n['hidden1'], n['hidden1']))
    n.addConnection(FullConnection(n['hidden2'], n['out'], name='c4'))
    #n.addConnection(FullConnection(n['hidden1'], n['out'], name='c2'))
    n.sortModules()
    return n
def buildSimpleMDLSTMNetwork(peepholes = False):
    N = RecurrentNetwork('simpleMDLstmNet')
    i = LinearLayer(1, name = 'i')
    dim = 1
    h = MDLSTMLayer(dim, peepholes = peepholes, name = 'MDlstm')
    o = LinearLayer(1, name = 'o')
    b = BiasUnit('bias')
    N.addModule(b)
    N.addOutputModule(o)
    N.addInputModule(i)
    N.addModule(h)
    N.addConnection(FullConnection(i, h, outSliceTo = 4*dim, name = 'f1'))
    N.addConnection(FullConnection(b, h, outSliceTo = 4*dim, name = 'f2'))
    N.addRecurrentConnection(FullConnection(h, h, inSliceTo = dim, outSliceTo = 4*dim, name = 'r1'))
    N.addRecurrentConnection(IdentityConnection(h, h, inSliceFrom = dim, outSliceFrom = 4*dim, name = 'rstate'))
    N.addConnection(FullConnection(h, o, inSliceTo = dim, name = 'f3'))
    N.sortModules()
    return N
示例#11
0
文件: gfnn.py 项目: andyr0id/PyGFNN
    def _randomizePhase(self, c):
        theta0 = np.random.randn(self.outdim * self.indim)
        theta0 = np.exp(1j * 2 * np.pi * theta0)
        return c * reshape(theta0, (self.outdim, self.indim))

    def _forwardImplementation(self, inbuf, outbuf):
        outbuf += inbuf

    def _backwardImplementation(self, outerr, inerr, inbuf):
        #CHECKME: not setting derivatives -- this means the multiplicative weight is never updated!
        inerr += 0

if __name__ == "__main__":
    # from pybrain.tests import runModuleTestSuite
    # import pygfnn.tests.unittests.structure.connections.test_gfnn_connections as test
    # runModuleTestSuite(test)
    from pybrain.structure.networks.recurrent import RecurrentNetwork
    from pybrain import LinearLayer, FullConnection, FullNotSelfConnection, IdentityConnection
    from pygfnn import GFNNLayer, RealIdentityConnection, RealMeanFieldConnection
    N = RecurrentNetwork('simpleGFNN')
    i = LinearLayer(1, name = 'i')
    h = GFNNLayer(200, name = 'gfnn')
    o = LinearLayer(200, name = 'o')
    N.addOutputModule(o)
    N.addInputModule(i)
    N.addModule(h)
    N.addConnection(GFNNExtConnection(i, h, name = 'f1'))
    N.addRecurrentConnection(GFNNIntConnection(h, h, name = 'r1'))
    N.addConnection(RealIdentityConnection(h, o, name = 'i1'))
    N.sortModules()
示例#12
0
def buildSimpleLSTMNetwork(peepholes=False):
    N = RecurrentNetwork("simpleLstmNet")
    i = LinearLayer(1, name="i")
    h = LSTMLayer(1, peepholes=peepholes, name="lstm")
    o = LinearLayer(1, name="o")
    b = BiasUnit("bias")
    N.addModule(b)
    N.addOutputModule(o)
    N.addInputModule(i)
    N.addModule(h)
    N.addConnection(FullConnection(i, h, name="f1"))
    N.addConnection(FullConnection(b, h, name="f2"))
    N.addRecurrentConnection(FullConnection(h, h, name="r1"))
    N.addConnection(FullConnection(h, o, name="r1"))
    N.sortModules()
    return N
示例#13
0
    def buildLSTMNetwork(self):
        # create network and modules
        net = RecurrentNetwork()
        inp = LinearLayer(self.n_input, name="Input")
        h1 = LSTMLayer(3, name='LSTM')
        h2 = SigmoidLayer(10, name='sigm')
        outp = LinearLayer(self.numActions, name='output')
        # add modules
        net.addOutputModule(outp)
        net.addInputModule(inp)
        net.addModule(h1)
        net.addModule(h2)
        # create connections from input
        net.addConnection(FullConnection(inp, h1, name="input_LSTM"))
        net.addConnection(FullConnection(inp, h2, name="input_sigm"))
        # create connections from LSTM
        net.addConnection(FullConnection(h1, h2, name="LSTM_sigm"))

        # add whichever recurrent connections
        net.addRecurrentConnection(FullConnection(h1, h1, name='LSTM_rec'))
        net.addRecurrentConnection(FullConnection(h2, h1,
                                                  name='sigm_LSTM_rec'))
        # create connections to output
        net.addConnection(FullConnection(h1, outp, name="LSTM_outp"))
        net.addConnection(FullConnection(h2, outp, name="sigm_outp"))

        # finish up
        net.sortModules()
        net.randomize()
        self.printModules(net)
        self.e = [0 for param in range(len(net.params))]
        # for each action, need to accumulate the gradient
        self.accumulated_gradients = [[0 for param in range(len(net.params))]
                                      for i in range(self.numActions)]
        return net
示例#14
0
def buildSimpleMDLSTMNetwork(peepholes=False):
    N = RecurrentNetwork('simpleMDLstmNet')
    i = LinearLayer(1, name='i')
    dim = 1
    h = MDLSTMLayer(dim, peepholes=peepholes, name='MDlstm')
    o = LinearLayer(1, name='o')
    b = BiasUnit('bias')
    N.addModule(b)
    N.addOutputModule(o)
    N.addInputModule(i)
    N.addModule(h)
    N.addConnection(FullConnection(i, h, outSliceTo=4 * dim, name='f1'))
    N.addConnection(FullConnection(b, h, outSliceTo=4 * dim, name='f2'))
    N.addRecurrentConnection(
        FullConnection(h, h, inSliceTo=dim, outSliceTo=4 * dim, name='r1'))
    N.addRecurrentConnection(
        IdentityConnection(h,
                           h,
                           inSliceFrom=dim,
                           outSliceFrom=4 * dim,
                           name='rstate'))
    N.addConnection(FullConnection(h, o, inSliceTo=dim, name='f3'))
    N.sortModules()
    return N
class FinancialNetwork(object):
    def __init__(self,indim,outdim,hiddim=6):
        self.network=RecurrentNetwork()

        ##CREATE MODULES
        self._in_layer = LinearLayer(indim+outdim)
        self._hid_layer = LSTMLayer(hiddim,peepholes=False)
        self._out_layer = LinearLayer(outdim)
        self._bias = BiasUnit()

        ##ADD MODULES
        self.network.addInputModule(self._in_layer)
        self.network.addModule(self._hid_layer)
        self.network.addModule(self._bias)
        self.network.addOutputModule(self._out_layer)
        self._last_hidden_layer = None
        self._first_hidden_layer = None

        ###CREATE CONNECTIONS
        self._hid_to_out_connection = FullConnection(self._hid_layer , self._out_layer)
        self._in_to_hid_connection = FullConnection(self._in_layer  , self._hid_layer)
        self._out_to_hid_connection=FullConnection(self._out_layer,self._hid_layer)

        ##ADD CONNECTIONS
        self.network.addConnection(self._hid_to_out_connection)
        self.network.addConnection(self._in_to_hid_connection)
        self.network.addConnection(FullConnection(self._bias, self._hid_layer))
        self.network.addRecurrentConnection(self._out_to_hid_connection)
        self.network.sortModules()

        self.backprojectionFactor = 1



    def getGenome(self):
        weights = []
        for layer in self.getHiddenLayers():
            if isinstance(layer, LSTMLayer):
#                 if layer is not self._recurrence_layer:
                weights += self._getGenomeOfLayer(layer)
        return weights

    def setGenome(self, weights):
        """ Sets the Genome of the network.
            See class description for more details.
        """
        weights = deepcopy(weights)
        for layer in self.getHiddenLayers():
            if isinstance(layer, LSTMLayer):
#               if layer is not self._recurrence_layer:
                self._setGenomeOfLayer(layer, weights)

    def _setGenomeOfLayer(self, layer, weights):

        dim = layer.outdim

        connections = self._getInputConnectionsOfLayer(layer)

        for cell_idx in range(dim):
            cell_weights = weights.pop(0)
            for c in connections:
                params = c.params
                params[cell_idx + 0 * dim] = cell_weights.pop(0)
                params[cell_idx + 1 * dim] = cell_weights.pop(0)
                params[cell_idx + 2 * dim] = cell_weights.pop(0)
                params[cell_idx + 3 * dim] = cell_weights.pop(0)
            assert not len(cell_weights)

    def _getGenomeOfLayer(self, layer):

        dim = layer.outdim
        layer_weights = []

        connections = self._getInputConnectionsOfLayer(layer)

        for cell_idx in range(dim):
            # todo: the evolino paper uses a different order of weights for the genotype of a lstm cell
            cell_weights = []
            for c in connections:
                cell_weights += [
                    c.params[ cell_idx + 0 * dim ],
                    c.params[ cell_idx + 1 * dim ],
                    c.params[ cell_idx + 2 * dim ],
                    c.params[ cell_idx + 3 * dim ] ]

            layer_weights.append(cell_weights)
        return layer_weights

    def _getInputConnectionsOfLayer(self, layer):
        """ Returns a list of all input connections for the layer. """
        connections = []
        for c in sum(list(self.network.connections.values()), []):
            if c.outmod is layer:
                if not isinstance(c, FullConnection):
                    raise NotImplementedError("At the time there is only support for FullConnection")
                connections.append(c)
        return connections


    def getHiddenLayers(self):
        """ Returns a list of all hidden layers. """
        layers = []
        network = self.network
        for m in network.modules:
            if m not in network.inmodules and m not in network.outmodules:
                layers.append(m)
        return layers


    def _getLastOutput(self):
        if self.network.offset == 0:
            return zeros(self.network.outdim)
        else:
            return self._out_layer.outputbuffer[self.network.offset - 1]

    def _setLastOutput(self, output):
        self._out_layer.outputbuffer[self.network.offset - 1][:] = output


    def washout(self,input):
        self.network.offset=0
        lstmvalues=[]
        for val in input:
            backprojection=self._getLastOutput()
            backprojection*=self.backprojectionFactor
            input=append(val,backprojection)
            output=self.network.activate(input)
            self._setLastOutput(output)
            lstmvalues.append(self._hid_layer.outputbuffer[self.network.offset - 1])

        return lstmvalues

    def reset(self):
        self.network.reset()


    def getOutputWeightMatrix(self):
        c=self._hid_to_out_connection
        W=c.params
        return reshape(W, (c.outdim, c.indim))

    def setOutputWeightMatrix(self,W):
        c=self._hid_to_out_connection
        p=c.params
        p[:]=W.flatten()

    def activate(self,input):
        outputs=[]
        for val in input:
            backprojection=self._getLastOutput()
            backprojection*=self.backprojectionFactor
            inputtrain=append(val,backprojection)
            output=self.network.activate(inputtrain)
            self._setLastOutput(output)
            outputs.append(self._out_layer.outputbuffer[self.network.offset - 1])

        return outputs
示例#16
0
def buildSimpleLSTMNetwork(peepholes = False):
    N = RecurrentNetwork('simpleLstmNet')  
    i = LinearLayer(1, name = 'i')
    h = LSTMLayer(1, peepholes = peepholes, name = 'lstm')
    o = LinearLayer(1, name = 'o')
    b = BiasUnit('bias')
    N.addModule(b)
    N.addOutputModule(o)
    N.addInputModule(i)
    N.addModule(h)
    N.addConnection(FullConnection(i, h, name = 'f1'))
    N.addConnection(FullConnection(b, h, name = 'f2'))
    N.addRecurrentConnection(FullConnection(h, h, name = 'r1'))
    N.addConnection(FullConnection(h, o, name = 'r1'))
    N.sortModules()
    return N
示例#17
0
class EvolinoNetwork(Module):
    def __init__(self, indim, outdim, hiddim=6):
        Module.__init__(self, indim, outdim)

        self._network = RecurrentNetwork()
        self._in_layer = LinearLayer(indim + outdim)
        self._hid_layer = LSTMLayer(hiddim)
        self._out_layer = LinearLayer(outdim)
        self._bias = BiasUnit()

        self._network.addInputModule(self._in_layer)
        self._network.addModule(self._hid_layer)
        self._network.addModule(self._bias)
        self._network.addOutputModule(self._out_layer)

        self._hid_to_out_connection = FullConnection(self._hid_layer , self._out_layer)
        self._in_to_hid_connection = FullConnection(self._in_layer  , self._hid_layer)
        self._network.addConnection(self._hid_to_out_connection)
        self._network.addConnection(self._in_to_hid_connection)
        self._network.addConnection(FullConnection(self._bias, self._hid_layer))

        self._network.sortModules()

        self.offset = self._network.offset
        self.backprojectionFactor = 1.0

        return

    def reset(self):
        self._network.reset()
        return

    def washout(self, inputs, targets, first_idx=None, last_idx=None):
        assert self.indim == len(inputs[0])
        assert self.outdim == len(targets[0])
        assert len(inputs) == len(targets)

        if first_idx is None:
            first_idx = 0

        if last_idx is None:
            last_idx = len(targets) - 1

        raw_outputs = []

        for i in xrange(first_idx, last_idx + 1):
            backprojection = self._getLastOutput()
            backprojection *= self.backprojectionFactor
            full_inp = self._createFullInput(inputs[i], backprojection)
            self._activateNetwork(full_inp)
            raw_out = self._getRawOutput()
            raw_outputs.append(np.array(raw_out))
            self._setLastOutput(targets[i])

        return np.array(raw_outputs)

    def extrapolate(self, inputs, targets, length):
        """ Extrapolate 'sequence' for 'length' steps and return the
        extrapolated sequence as array.

        Extrapolating is realized by reseting the network, then washing it out
        with the supplied  sequence, and then generating a sequence.
        """
        testing_idx = len(targets)
        self.reset()

        self.washout(inputs[:testing_idx], targets)
        outputs = self.generate(inputs[testing_idx:], length)

        return outputs

    def generate(self, inputs, length):
        generated_sequence = []

        for i in xrange(length):
            output = self.activate(inputs[i])
            generated_sequence.append(output)

        return np.array(generated_sequence)

    def _activateNetwork(self, input):
        assert len(input) == self._network.indim
        output = self._network.activate(input)

        self.offset = self._network.offset

        return output

    def activate(self, input):
        assert len(input) == self.indim

        backprojection = self._getLastOutput()
        backprojection *= self.backprojectionFactor
        full_inp = self._createFullInput(input, backprojection)
        out = self._activateNetwork(full_inp)
        self._setLastOutput(out)

        return out

    def calculateOutput(self, dataset, washout_ratio):
        # iterate through all sequences
        collected_input = None
        collected_output = None
        collected_target = None

        for i in range(dataset.getNumSequences()):
            seq = dataset.getSequence(i)
            input = seq[0]
            target = seq[1]

            washout_steps = int(len(input) * washout_ratio)

            washout_input = input[:washout_steps ]
            washout_target = target[:washout_steps ]
            calculation_target = target[washout_steps:]

            # reset
            self.reset()

            # washout
            self.washout(washout_input, washout_target)

            # collect calculation data
            outputs = []
            inputs = []

            for inp in input[washout_steps:]:
                out = self.activate(inp)
                inputs.append(inp)
                outputs.append(out)

            # collect output and targets
            if collected_input is not None:
                collected_input = np.append(collected_input, inputs, axis=0)
            else:
                collected_input = np.array(inputs)

            if collected_output is not None:
                collected_output = np.append(collected_output, outputs, axis=0)
            else:
                collected_output = np.array(outputs)

            if collected_target is not None:
                collected_target = np.append(collected_target, calculation_target, axis=0)
            else:
                collected_target = calculation_target

        return collected_input, collected_output, collected_target

    def _createFullInput(self, input, output):
        if self.indim > 0:
            return np.append(input, output)

        return np.array(output)

    def _getLastOutput(self):
        if self.offset == 0:
            return np.zeros(self.outdim)

        #return self._out_layer.outputbuffer[self.offset - 1]
        return self._network.outputbuffer[self.offset - 1]

    def _setLastOutput(self, output):
        #self._out_layer.outputbuffer[self.offset - 1][:] = output
        self._network.outputbuffer[self.offset - 1][:] = output

        return

    # ======================================================== Genome related ===
    def _validateGenomeLayer(self, layer):
        """ Validates the type and state of a layer
        """
        assert isinstance(layer, LSTMLayer)
        assert not layer.peepholes
        return

    def getGenome(self):
        """ Returns the Genome of the network.
            See class description for more details.
        """
        return self._getGenomeOfLayer(self._hid_layer)

    def setGenome(self, weights):
        """ Sets the Genome of the network.
            See class description for more details.
        """
        weights = deepcopy(weights)
        self._setGenomeOfLayer(self._hid_layer, weights)

        return

    def _getGenomeOfLayer(self, layer):
        """ Returns the genome of a single layer.
        """
        self._validateGenomeLayer(layer)

        dim = layer.outdim
        layer_weights = []

        connections = self._getInputConnectionsOfLayer(layer)

        for cell_idx in range(dim):
            # todo: the evolino paper uses a different order of weights for the genotype of a lstm cell
            cell_weights = []
            for c in connections:
                cell_weights += [
                    c.params[ cell_idx + 0 * dim ],
                    c.params[ cell_idx + 1 * dim ],
                    c.params[ cell_idx + 2 * dim ],
                    c.params[ cell_idx + 3 * dim ] ]

            layer_weights.append(cell_weights)

        return layer_weights

    def _setGenomeOfLayer(self, layer, weights):
        """ Sets the genome of a single layer.
        """
        self._validateGenomeLayer(layer)

        dim = layer.outdim

        connections = self._getInputConnectionsOfLayer(layer)

        for cell_idx in range(dim):
            cell_weights = weights.pop(0)
            for c in connections:
                params = c.params
                params[cell_idx + 0 * dim] = cell_weights.pop(0)
                params[cell_idx + 1 * dim] = cell_weights.pop(0)
                params[cell_idx + 2 * dim] = cell_weights.pop(0)
                params[cell_idx + 3 * dim] = cell_weights.pop(0)

            assert not len(cell_weights)

        return

    # ============================================ Linear Regression related ===
    def setOutputWeightMatrix(self, W):
        """ Sets the weight matrix of the output layer's input connection.
        """
        c = self._hid_to_out_connection
        c.params[:] = W.flatten()

        return

    def getOutputWeightMatrix(self):
        """ Sets the weight matrix of the output layer's input connection.
        """
        c = self._hid_to_out_connection
        #p = c.getParameters()
        p = c.params[:]

        return np.reshape(p, (c.outdim, c.indim))

    def _getRawOutput(self):
        """ Returns the current output of the last hidden layer.
            This is needed for linear regression, which calculates
            the weight matrix W of the full connection between this layer
            and the output layer.
        """
        return copy(self._hid_layer.outputbuffer[self.offset - 1])

    # ====================================================== Topology Helper ===
    def _getInputConnectionsOfLayer(self, layer):
        """ Returns a list of all input connections for the layer. """
        connections = []

        for c in sum(self._network.connections.values(), []):
            if c.outmod is layer:
                if not isinstance(c, FullConnection):
                    raise NotImplementedError("At the time there is only support for FullConnection")

                connections.append(c)

        return connections