Example #1
0
 def setInputScaling(self, newInputScaling):
     inputScaling = B.ones(self.n_input) * self._inputScaling
     self._expandedInputScaling = B.vstack(
         (B.array(1.0), inputScaling.reshape(-1, 1))).flatten()
     self._WInput = self._WInput * (self._expandedInputScaling /
                                    self._inputScaling)
     self._inputScaling = newInputScaling
Example #2
0
    def propagateOneStep(self, inputData, outputData, step, transientTime=0, verbose=0, steps="auto", learn=False):

        if (verbose > 0):
            bar = progressbar.ProgressBar(max_value=inputLength, redirect_stdout=True, poll_interval=0.0001)
            bar.update(0)

        x = self._x

        #updates states
        u, x = self.update(inputData=inputData, x=x)
        self._x = x
        
        self._X = B.vstack((B.array(self._outputBias), self._outputInputScaling*u, x))

        # calculate output
        #estimatedData = self.out_activation(B.dot(self._WOut, self._X).T)
        #y_step = estimatedData 
        Y = B.dot(self._WOut, self._X)

        if(learn):
            #learning rate
            rate = 0.1
            #calculate target activation
            y_target = self.out_inverse_activation(outputData).T
            #solve for Wout by using expected_output and states
            wout = B.dot(y_target.reshape(self.n_output,1), B.pinv(self._X)) #.reshape(self.n_output,1)
            self._WOut = rate*wout + (1-rate)*self._WOut
           
        if (verbose > 0):
            bar.update(t)
                                 
        if (verbose > 0):
            bar.finish()

        return self._X, Y
Example #3
0
    def update(self, inputData, outputData=None, x=None):
        if x is None:
            x = self._x

        if self._WFeedback is None:
            # reshape the data
            u = inputData.reshape(self.n_input, 1)

            # update the states
            transmission = self.calculateLinearNetworkTransmissions(u, x)
            x *= 1.0 - self._leakingRate
            x += self._leakingRate * self._activation(
                transmission + (np.random.rand() - 0.5) * self._noiseLevel)

            return u

        else:
            # the input is allowed to be "empty" (size=0)
            if self.n_input != 0:
                # reshape the data
                u = inputData.reshape(self.n_input, 1)
                outputData = outputData.reshape(self.n_output, 1)

                # update the states
                transmission = self.calculateLinearNetworkTransmissions(u, x)
                x *= 1.0 - self._leakingRate
                x += self._leakingRate * self._activation(transmission + B.dot(
                    self._WFeedback,
                    B.vstack((B.array(self._outputBias), outputData)),
                ) + (np.random.rand() - 0.5) * self._noiseLevel)

                return u
            else:
                # reshape the data
                outputData = outputData.reshape(self.n_output, 1)
                # update the states
                transmission = B.dot(self._W, x)
                x *= 1.0 - self._leakingRate
                x += self._leakingRate * self._activation(transmission + B.dot(
                    self._WFeedback,
                    B.vstack((B.array(self._outputBias), outputData)),
                ) + (np.random.rand() - 0.5) * self._noiseLevel)

                return B.empty((0, 1))
Example #4
0
    def __init__(self, n_input, n_reservoir, n_output,
                 spectralRadius=1.0, noiseLevel=0.01, inputScaling=None,
                 leakingRate=1.0, feedbackScaling = 1.0, reservoirDensity=0.2, randomSeed=None,
                 out_activation=lambda x: x, out_inverse_activation=lambda x: x,
                 weightGeneration='naive', bias=1.0, outputBias=1.0, outputInputScaling=1.0,
                 feedback=False, inputDensity=1.0, activation = B.tanh, activationDerivation=lambda x: 1.0/B.cosh(x)**2):

        self.n_input = n_input
        self.n_reservoir = n_reservoir
        self.n_output = n_output

        self._spectralRadius = spectralRadius
        self._noiseLevel = noiseLevel
        self._reservoirDensity = reservoirDensity
        self._leakingRate = leakingRate
        self._feedbackScaling = feedbackScaling
        self.inputDensity = inputDensity
        self._activation = activation
        self._activationDerivation = activationDerivation
        self._inputScaling = inputScaling
        
        #this seems to be initialized somewhere (mystery)
        #self._x = np.zeros(( 1+ n_reservoir, 1))

        if self._inputScaling is None:
            self._inputScaling = 1.0
        if np.isscalar(self._inputScaling):
            self._inputScaling = B.ones(n_input) * self._inputScaling
        else:
            if len(self._inputScaling) != self.n_input:
                raise ValueError("Dimension of inputScaling ({0}) does not match the input data dimension ({1})".format(len(self._inputScaling), n_input))
            self._inputScaling = inputScaling

        self._expandedInputScaling = B.vstack((B.array(1.0), self._inputScaling.reshape(-1,1))).flatten()

        self.out_activation = out_activation
        self.out_inverse_activation = out_inverse_activation

        if randomSeed is not None:
            rnd.seed(randomSeed)

        self._bias = bias
        self._outputBias = outputBias
        self._outputInputScaling = outputInputScaling
        self._createReservoir(weightGeneration, feedback)
Example #5
0
    def calculateLinearNetworkTransmissions(self, u, x=None):
        if x is None:
            x = self._x

        return B.dot(self._WInput, B.vstack((B.array(self._bias), u))) + B.dot(self._W, x)
Example #6
0
    def propagate(self, inputData, outputData=None, transientTime=0, verbose=0, x=None, steps="auto", feedbackData=None):
        if x is None:
            x = self._x

        inputLength = steps
        if inputData is None:
            if outputData is not None: 
                inputLength = len(outputData)
        else:
            inputLength = len(inputData)
        if inputLength == "auto":
            raise ValueError("inputData and outputData are both None. Therefore, steps must not be `auto`.")

        # define states' matrix
        X = B.zeros((1 + self.n_input + self.n_reservoir, inputLength - transientTime))

        if (verbose > 0):
            bar = progressbar.ProgressBar(max_value=inputLength, redirect_stdout=True, poll_interval=0.0001)
            bar.update(0)

        if self._WFeedback is None:
            #do not distinguish between whether inputData is None or not, as the feedback has been disabled
            #therefore, the input has to be anything but None

            for t in range(inputLength):
                u, x = self.update(inputData[t], x=x)
                if (t >= transientTime):
                    #add valueset to the states' matrix
                    X[:,t-transientTime] = B.vstack((B.array(self._outputBias), self._outputInputScaling*u, x))[:,0]
                    Y = B.dot(self._WOut, X)
                if (verbose > 0):
                    bar.update(t)
        else:
            if outputData is None:
                Y = B.empty((inputLength-transientTime, self.n_output))

            if feedbackData is None:
                feedbackData = B.zeros((1, self.n_output))

            if inputData is None:
                for t in range(inputLength):
                    self.update(None, feedbackData, x=x)
                    if (t >= transientTime):
                        #add valueset to the states' matrix
                        X[:,t-transientTime] = B.vstack((B.array(self._outputBias), x))[:,0]
                    if outputData is None:
                        #calculate the prediction using the trained model
                        if (self._solver in ["sklearn_auto", "sklearn_lsqr", "sklearn_sag", "sklearn_svd"]):
                            feedbackData = self._ridgeSolver.predict(B.vstack((B.array(self._outputBias), self._x)).T)
                        else:
                            feedbackData = B.dot(self._WOut, B.vstack((B.array(self._outputBias), self._x)))
                        if t >= transientTime:
                            Y[t-transientTime, :] = feedbackData
                    else:
                        feedbackData = outputData[t]
                
                    if (verbose > 0):
                        bar.update(t)
            else:
                for t in range(inputLength):
                    u = self.update(inputData[t], feedbackData, x=x)
                    if (t >= transientTime):
                        #add valueset to the states' matrix
                        X[:,t-transientTime] = B.vstack((B.array(self._outputBias), self._outputInputScaling*u, x))[:,0]
                    if outputData is None:
                        #calculate the prediction using the trained model
                        if (self._solver in ["sklearn_auto", "sklearn_lsqr", "sklearn_sag", "sklearn_svd"]):
                            feedbackData = self._ridgeSolver.predict(B.vstack((B.array(self._outputBias), self._outputInputScaling*u, self._x)).T)
                        else:
                            feedbackData = B.dot(self._WOut, B.vstack((B.array(self._outputBias), self._outputInputScaling*u, self._x)))
                        Y[t, :] = feedbackData.ravel()
                    else:
                        feedbackData = outputData[t]
                
                    if (verbose > 0):
                        bar.update(t)
                                 
        if (verbose > 0):
            bar.finish()

        return X, Y
Example #7
0
    def __init__(
        self,
        n_input,
        n_reservoir,
        n_output,
        spectralRadius=1.0,
        noiseLevel=0.01,
        inputScaling=None,
        leakingRate=1.0,
        feedbackScaling=1.0,
        reservoirDensity=0.2,
        randomSeed=None,
        out_activation=lambda x: x,
        out_inverse_activation=lambda x: x,
        weightGeneration="naive",
        bias=1.0,
        outputBias=1.0,
        outputInputScaling=1.0,
        feedback=False,
        inputDensity=1.0,
        activation=B.tanh,
        activationDerivative=lambda x: 1.0 / B.cosh(x)**2,
    ):
        """Implementation of a ESN.

        Args:
            n_input : Dimensionality of the input.
            n_reservoir : Number of units in the reservoir.
            n_output : Dimensionality of the output.
            spectralRadius : Spectral radius of the reservoir's connection/weight matrix.
            noiseLevel : Magnitude of noise that is added to the input while fitting to prevent overfitting.
            inputScaling : Scaling factor of the input.
            leakingRate : Convex combination factor between 0 and 1 that weights current and new state value.
            feedbackScaling : Rescaling factor of the output-to-input feedback in the update process.
            reservoirDensity : Percentage of non-zero weight connections in the reservoir.
            randomSeed : Seed for random processes, e.g. weight initialization.
            out_activation : Final activation function (i.e. activation function of the output).
            out_inverse_activation : Inverse of the final activation function
            weightGeneration : Algorithm to generate weight matrices. Choices: naive, SORM, advanced, custom
            bias : Size of the bias added for the internal update process.
            outputBias : Size of the bias added for the final linear regression of the output.
            outputInputScaling : Rescaling factor for the input of the ESN for the regression.
            feedback : Include output-input feedback in the ESN.
            inputDensity : Percentage of non-zero weights in the input-to-reservoir weight matrix.
            activation : (Non-linear) Activation function.
            activationDerivative : Derivative of the activation function.
        """

        self.n_input = n_input
        self.n_reservoir = n_reservoir
        self.n_output = n_output

        self._spectralRadius = spectralRadius
        self._noiseLevel = noiseLevel
        self._reservoirDensity = reservoirDensity
        self._leakingRate = leakingRate
        self._feedbackScaling = feedbackScaling
        self.inputDensity = inputDensity
        self._activation = activation
        self._activationDerivative = activationDerivative
        self._inputScaling = inputScaling

        if self._inputScaling is None:
            self._inputScaling = 1.0
        if np.isscalar(self._inputScaling):
            self._inputScaling = B.ones(n_input) * self._inputScaling
        else:
            if len(self._inputScaling) != self.n_input:
                raise ValueError(
                    "Dimension of inputScaling ({0}) does not match the input data dimension ({1})"
                    .format(len(self._inputScaling), n_input))
            self._inputScaling = inputScaling

        self._expandedInputScaling = B.vstack(
            (B.array(1.0), self._inputScaling.reshape(-1, 1))).flatten()

        self.out_activation = out_activation
        self.out_inverse_activation = out_inverse_activation

        if randomSeed is not None:
            B.seed(randomSeed)
            np.random.seed(randomSeed)

        self._bias = bias
        self._outputBias = outputBias
        self._outputInputScaling = outputInputScaling
        self._createReservoir(weightGeneration, feedback)