Exemple #1
0
 def setInputScaling(self, newInputScaling):
     inputScaling = B.ones(self.n_input) * self._inputScaling
     self._expandedInputScaling = B.vstack(
         (B.array(1.0), inputScaling.reshape(-1, 1))).flatten()
     self._WInput = self._WInput * (self._expandedInputScaling /
                                    self._inputScaling)
     self._inputScaling = newInputScaling
Exemple #2
0
    def propagateOneStep(self, inputData, outputData, step, transientTime=0, verbose=0, steps="auto", learn=False):

        if (verbose > 0):
            bar = progressbar.ProgressBar(max_value=inputLength, redirect_stdout=True, poll_interval=0.0001)
            bar.update(0)

        x = self._x

        #updates states
        u, x = self.update(inputData=inputData, x=x)
        self._x = x
        
        self._X = B.vstack((B.array(self._outputBias), self._outputInputScaling*u, x))

        # calculate output
        #estimatedData = self.out_activation(B.dot(self._WOut, self._X).T)
        #y_step = estimatedData 
        Y = B.dot(self._WOut, self._X)

        if(learn):
            #learning rate
            rate = 0.1
            #calculate target activation
            y_target = self.out_inverse_activation(outputData).T
            #solve for Wout by using expected_output and states
            wout = B.dot(y_target.reshape(self.n_output,1), B.pinv(self._X)) #.reshape(self.n_output,1)
            self._WOut = rate*wout + (1-rate)*self._WOut
           
        if (verbose > 0):
            bar.update(t)
                                 
        if (verbose > 0):
            bar.finish()

        return self._X, Y
Exemple #3
0
def autocorrelation(self, x):
    n = x.shape[0]
    variance = B.var(x)
    x = x - B.mean(x)
    r = B.correlate(x, x, mode="full")[-n:]
    assert B.allclose(
        r, B.array([(x[:n - k] * x[-(n - k):]).sum() for k in range(n)]))
    result = r / (variance * (B.arange(n, 0, -1)))
    return result
Exemple #4
0
    def update(self, inputData, outputData=None, x=None):
        if x is None:
            x = self._x

        if self._WFeedback is None:
            # reshape the data
            u = inputData.reshape(self.n_input, 1)

            # update the states
            transmission = self.calculateLinearNetworkTransmissions(u, x)
            x *= 1.0 - self._leakingRate
            x += self._leakingRate * self._activation(
                transmission + (np.random.rand() - 0.5) * self._noiseLevel)

            return u

        else:
            # the input is allowed to be "empty" (size=0)
            if self.n_input != 0:
                # reshape the data
                u = inputData.reshape(self.n_input, 1)
                outputData = outputData.reshape(self.n_output, 1)

                # update the states
                transmission = self.calculateLinearNetworkTransmissions(u, x)
                x *= 1.0 - self._leakingRate
                x += self._leakingRate * self._activation(transmission + B.dot(
                    self._WFeedback,
                    B.vstack((B.array(self._outputBias), outputData)),
                ) + (np.random.rand() - 0.5) * self._noiseLevel)

                return u
            else:
                # reshape the data
                outputData = outputData.reshape(self.n_output, 1)
                # update the states
                transmission = B.dot(self._W, x)
                x *= 1.0 - self._leakingRate
                x += self._leakingRate * self._activation(transmission + B.dot(
                    self._WFeedback,
                    B.vstack((B.array(self._outputBias), outputData)),
                ) + (np.random.rand() - 0.5) * self._noiseLevel)

                return B.empty((0, 1))
Exemple #5
0
    def predict(
        self,
        inputData,
        continuation=True,
        initialData=None,
        update_processor=lambda x: x,
        verbose=0,
    ):
        inputData = B.array(inputData)

        # let some input run through the ESN to initialize its states from a new starting value
        if not continuation:
            self._x = B.zeros(self._x.shape)

            if initialData is not None:
                if self._WFeedback is None:
                    for t in range(initialData.shape[0]):
                        super(PredictionESN, self).update(initialData[t])
                else:
                    if type(initialData) is tuple:
                        initialDataInput, initialDataOutput = initialData
                        if len(initialDataInput) != len(initialDataOutput):
                            raise ValueError(
                                "Length of the inputData and the outputData of the initialData tuple do not match."
                            )
                    else:
                        raise ValueError(
                            "initialData has to be a tuple consisting out of the input and the output data."
                        )

                    super(PredictionESN, self).update(initialDataInput[t],
                                                      initialDataOutput[t])

        X = self.propagate(inputData, verbose=verbose)

        if self._WFeedback is not None:
            X, _ = X

        # calculate the prediction using the trained model
        if self._solver in [
                "sklearn_auto",
                "sklearn_lsqr",
                "sklearn_sag",
                "sklearn_svd",
                "sklearn_svr",
        ]:
            Y = self._ridgeSolver.predict(X.T).reshape((self.n_output, -1))
        else:
            Y = B.dot(self._WOut, X)

        # apply the output activation function
        Y = update_processor(self.out_activation(Y))

        # return the result
        return Y.T
    def predict_loop(self,
                     inputData,
                     outputData1,
                     outputData2,
                     continuation=True,
                     initialData=None,
                     update_processor=lambda x: x,
                     verbose=0):
        inputData = B.array(inputData)

        #let some input run through the ESN to initialize its states from a new starting value
        if (not continuation):
            self._esn1._x = B.zeros(self._esn1._x.shape)
            self._esn2._x = B.zeros(self._esn2._x.shape)

        total_length = inputData.shape[0]
        aggregated_y1 = B.empty((total_length, self._n_output1))
        aggregated_y2 = B.empty((total_length, self._n_output2))

        # X1, y1 = self._esn1.propagate(inputData=inputData, outputData=None, transientTime=self._transientTime, verbose=verbose-1)
        # Y1 = B.dot(self._esn1._WOut, X1)
        # Y1 = update_processor(self.out_activation(Y1)).T

        y2 = self.out_activation(B.dot(self._esn2._WOut, self._esn2._X).T)

        for i in range(total_length):

            inputDataWithFeedback = B.zeros((self.n_input + self._n_output2))
            inputDataWithFeedback[:self.n_input] = inputData[i, :]
            inputDataWithFeedback[self.n_input:] = y2

            #update models
            X1, _ = self._esn1.propagateOneStep(
                inputData=inputDataWithFeedback,
                outputData=None,
                step=i,
                transientTime=self._transientTime,
                verbose=verbose - 1,
                learn=False)
            y1 = B.dot(self._esn1._WOut, X1)
            aggregated_y1[i, :] = update_processor(self.out_activation(y1)).T

            #output from 1st layer and correct output
            #input2 = outputData1[i] - y1.reshape(self._n_output1)
            # input2 = B.vstack((y1.reshape(self._n_output1), outputData1[i]))
            # x2, y2 = self._esn2.propagateOneStep(inputData=input2, outputData=None, step=i, transientTime=self._transientTime, verbose=verbose-1, learn=False)
            # Y_target2[i,:]  = y2.reshape(self._n_output2)

        training_error1 = B.sqrt(B.mean((aggregated_y1 - outputData1)**2))

        print("training errors")
        print(training_error1)

        return aggregated_y1, aggregated_y2
Exemple #7
0
    def __init__(self, n_input, n_reservoir, n_output,
                 spectralRadius=1.0, noiseLevel=0.01, inputScaling=None,
                 leakingRate=1.0, feedbackScaling = 1.0, reservoirDensity=0.2, randomSeed=None,
                 out_activation=lambda x: x, out_inverse_activation=lambda x: x,
                 weightGeneration='naive', bias=1.0, outputBias=1.0, outputInputScaling=1.0,
                 feedback=False, inputDensity=1.0, activation = B.tanh, activationDerivation=lambda x: 1.0/B.cosh(x)**2):

        self.n_input = n_input
        self.n_reservoir = n_reservoir
        self.n_output = n_output

        self._spectralRadius = spectralRadius
        self._noiseLevel = noiseLevel
        self._reservoirDensity = reservoirDensity
        self._leakingRate = leakingRate
        self._feedbackScaling = feedbackScaling
        self.inputDensity = inputDensity
        self._activation = activation
        self._activationDerivation = activationDerivation
        self._inputScaling = inputScaling
        
        #this seems to be initialized somewhere (mystery)
        #self._x = np.zeros(( 1+ n_reservoir, 1))

        if self._inputScaling is None:
            self._inputScaling = 1.0
        if np.isscalar(self._inputScaling):
            self._inputScaling = B.ones(n_input) * self._inputScaling
        else:
            if len(self._inputScaling) != self.n_input:
                raise ValueError("Dimension of inputScaling ({0}) does not match the input data dimension ({1})".format(len(self._inputScaling), n_input))
            self._inputScaling = inputScaling

        self._expandedInputScaling = B.vstack((B.array(1.0), self._inputScaling.reshape(-1,1))).flatten()

        self.out_activation = out_activation
        self.out_inverse_activation = out_inverse_activation

        if randomSeed is not None:
            rnd.seed(randomSeed)

        self._bias = bias
        self._outputBias = outputBias
        self._outputInputScaling = outputInputScaling
        self._createReservoir(weightGeneration, feedback)
    def predict(self,
                inputData,
                outputData1,
                outputData2,
                continuation=True,
                initialData=None,
                update_processor=lambda x: x,
                verbose=0):
        inputData = B.array(inputData)

        #let some input run through the ESN to initialize its states from a new starting value
        if (not continuation):
            self._esn1._x = B.zeros(self._esn1._x.shape)
            self._esn2._x = B.zeros(self._esn2._x.shape)

        total_length = inputData.shape[0]
        aggregated_y1 = B.empty((total_length, self._n_output1))
        aggregated_y2 = B.empty((total_length, self._n_output2))

        #put pixels and switch data together
        inputDataWithFeedback = B.zeros(
            (total_length, self.n_input + self._n_output2))
        inputDataWithFeedback[:, :self.n_input] = inputData
        inputDataWithFeedback[:, self.n_input:] = outputData2

        X1, _ = self._esn1.propagate(inputData=inputDataWithFeedback,
                                     outputData=None,
                                     transientTime=self._transientTime,
                                     verbose=verbose - 1)
        aggregated_y1 = B.dot(self._esn1._WOut, X1)
        aggregated_y1 = update_processor(self.out_activation(aggregated_y1)).T

        training_error1 = B.sqrt(B.mean((aggregated_y1 - outputData1)**2))

        print("predict errors")
        print(training_error1)

        return aggregated_y1, aggregated_y2
    def fit_loop(self,
                 inputData,
                 outputData1,
                 outputData2,
                 transientTime="AutoReduce",
                 transientTimeCalculationEpsilon=1e-3,
                 transientTimeCalculationLength=20,
                 verbose=0):
        #check the input data
        if self.n_input != 0:
            if len(inputData.shape) == 3 and len(outputData1.shape) > 1:
                #multiple time series are used with a shape (timeseries, time, dimension) -> (timeseries, time, dimension)
                if inputData.shape[0] != outputData1.shape[0]:
                    raise ValueError(
                        "Amount of input and output datasets is not equal - {0} != {1}"
                        .format(inputData.shape[0], outputData1.shape[0]))
                if inputData.shape[1] != outputData1.shape[1]:
                    raise ValueError(
                        "Amount of input and output time steps is not equal - {0} != {1}"
                        .format(inputData.shape[1], outputData1.shape[1]))
            else:
                if inputData.shape[0] != outputData1.shape[0]:
                    raise ValueError(
                        "Amount of input and output time steps is not equal - {0} != {1}"
                        .format(inputData.shape[0], outputData1.shape[0]))
        else:
            if inputData is not None:
                raise ValueError(
                    "n_input has been set to zero. Therefore, the given inputData will not be used."
                )

        inputData = B.array(inputData)
        outputData1 = B.array(outputData1)
        outputData2 = B.array(outputData2)
        self._transientTime = transientTime

        self._esn1.resetState()
        self._esn2.resetState()

        total_length = inputData.shape[0]
        print("total_length ", total_length)

        if (verbose > 0):
            bar = progressbar.ProgressBar(max_value=total_length,
                                          redirect_stdout=True,
                                          poll_interval=0.0001)
            bar.update(0)

        #should be named aggregated_y
        aggregated_y1 = B.empty((outputData1.shape))
        aggregated_y2 = B.empty((outputData2.shape))

        # X1, _ = self._esn1.propagate(inputData=inputData, outputData=outputData1, transientTime=transientTime, verbose=verbose-1)
        # self._esn1._X = X1

        # Y_target = self.out_inverse_activation(outputData1).T
        # self._esn1._WOut = B.dot(Y_target, B.pinv(X1))
        # y1 = self.out_activation((B.dot(self._esn1._WOut, X1)).T)
        # training_error1 = B.sqrt(B.mean((y1.reshape((total_length, self._n_output1))- outputData1)**2))
        # training_error2 = 0

        y2 = self.out_activation(B.dot(self._esn2._WOut, self._esn2._X).T)

        for i in range((total_length)):

            #input: input + output from layer 2
            inputDataWithFeedback = B.zeros((self.n_input + self._n_output2))
            inputDataWithFeedback[:self.n_input] = inputData[i, :]
            inputDataWithFeedback[self.n_input:] = y2

            #update models
            X1, untrained_y1 = self._esn1.propagateOneStep(
                inputData=inputDataWithFeedback,
                outputData=outputData1[i],
                step=i,
                transientTime=transientTime,
                verbose=verbose - 1,
                learn=True)
            #self._esn1._X = X1
            #y after model update (ideally we would use y before model update)
            #y1 = self.out_activation((B.dot(self._esn1._WOut, X1)).T)
            aggregated_y1[i, :] = untrained_y1.reshape(self._n_output1)

            #output from 1st layer and correct output
            # input2 = B.vstack((y1.reshape(self._n_output1), outputData1[i]))
            # x2, y2 = self._esn2.propagateOneStep(inputData=input2, outputData=outputData2[i], step=i, transientTime=transientTime, verbose=verbose-1, learn=True)
            # Y_target2[i,:]  = y2.reshape(self._n_output2)

            if (verbose > 0):
                bar.update(i)
        if (verbose > 0):
            bar.finish()

        self._training_res = aggregated_y1
        training_error1 = B.sqrt(B.mean((aggregated_y1 - outputData1)**2))
        training_error2 = B.sqrt(B.mean((aggregated_y2 - outputData2)**2))

        print("training errors")
        print(training_error1)
        print(training_error2)

        return training_error1, training_error2
    def fit(self,
            inputData,
            outputData1,
            outputData2,
            transientTime="AutoReduce",
            transientTimeCalculationEpsilon=1e-3,
            transientTimeCalculationLength=20,
            verbose=0):
        #check the input data
        if self.n_input != 0:
            if len(inputData.shape) == 3 and len(outputData1.shape) > 1:
                #multiple time series are used with a shape (timeseries, time, dimension) -> (timeseries, time, dimension)
                if inputData.shape[0] != outputData1.shape[0]:
                    raise ValueError(
                        "Amount of input and output datasets is not equal - {0} != {1}"
                        .format(inputData.shape[0], outputData1.shape[0]))
                if inputData.shape[1] != outputData1.shape[1]:
                    raise ValueError(
                        "Amount of input and output time steps is not equal - {0} != {1}"
                        .format(inputData.shape[1], outputData1.shape[1]))
            else:
                if inputData.shape[0] != outputData1.shape[0]:
                    raise ValueError(
                        "Amount of input and output time steps is not equal - {0} != {1}"
                        .format(inputData.shape[0], outputData1.shape[0]))
        else:
            if inputData is not None:
                raise ValueError(
                    "n_input has been set to zero. Therefore, the given inputData will not be used."
                )

        inputData = B.array(inputData)
        outputData1 = B.array(outputData1)
        outputData2 = B.array(outputData2)
        self._transientTime = transientTime

        self._esn1.resetState()
        self._esn2.resetState()

        total_length = inputData.shape[0]
        print("total_length ", total_length)

        aggregated_y1 = B.empty((outputData1.shape))
        aggregated_y2 = B.empty((outputData2.shape))

        #put pixels and switch data together
        inputDataWithFeedback = B.zeros(
            (total_length, self.n_input + self._n_output2))
        inputDataWithFeedback[:, :self.n_input] = inputData
        inputDataWithFeedback[:, self.n_input:] = outputData2

        X1, _ = self._esn1.propagate(inputData=inputDataWithFeedback,
                                     outputData=outputData1,
                                     transientTime=transientTime,
                                     verbose=verbose - 1)
        self._esn1._X = X1

        Y_target = self.out_inverse_activation(outputData1).T
        self._esn1._WOut = B.dot(Y_target, B.pinv(X1))
        aggregated_y1 = self.out_activation((B.dot(self._esn1._WOut, X1)).T)
        training_error1 = B.sqrt(
            B.mean((aggregated_y1.reshape(
                (total_length, self._n_output1)) - outputData1)**2))
        training_error2 = 0

        training_error1 = B.sqrt(B.mean((aggregated_y1 - outputData1)**2))
        training_error2 = B.sqrt(B.mean((aggregated_y2 - outputData2)**2))

        return training_error1, training_error2
Exemple #11
0
    def calculateLinearNetworkTransmissions(self, u, x=None):
        if x is None:
            x = self._x

        return B.dot(self._WInput, B.vstack((B.array(self._bias), u))) + B.dot(self._W, x)
Exemple #12
0
    def propagate(self, inputData, outputData=None, transientTime=0, verbose=0, x=None, steps="auto", feedbackData=None):
        if x is None:
            x = self._x

        inputLength = steps
        if inputData is None:
            if outputData is not None: 
                inputLength = len(outputData)
        else:
            inputLength = len(inputData)
        if inputLength == "auto":
            raise ValueError("inputData and outputData are both None. Therefore, steps must not be `auto`.")

        # define states' matrix
        X = B.zeros((1 + self.n_input + self.n_reservoir, inputLength - transientTime))

        if (verbose > 0):
            bar = progressbar.ProgressBar(max_value=inputLength, redirect_stdout=True, poll_interval=0.0001)
            bar.update(0)

        if self._WFeedback is None:
            #do not distinguish between whether inputData is None or not, as the feedback has been disabled
            #therefore, the input has to be anything but None

            for t in range(inputLength):
                u, x = self.update(inputData[t], x=x)
                if (t >= transientTime):
                    #add valueset to the states' matrix
                    X[:,t-transientTime] = B.vstack((B.array(self._outputBias), self._outputInputScaling*u, x))[:,0]
                    Y = B.dot(self._WOut, X)
                if (verbose > 0):
                    bar.update(t)
        else:
            if outputData is None:
                Y = B.empty((inputLength-transientTime, self.n_output))

            if feedbackData is None:
                feedbackData = B.zeros((1, self.n_output))

            if inputData is None:
                for t in range(inputLength):
                    self.update(None, feedbackData, x=x)
                    if (t >= transientTime):
                        #add valueset to the states' matrix
                        X[:,t-transientTime] = B.vstack((B.array(self._outputBias), x))[:,0]
                    if outputData is None:
                        #calculate the prediction using the trained model
                        if (self._solver in ["sklearn_auto", "sklearn_lsqr", "sklearn_sag", "sklearn_svd"]):
                            feedbackData = self._ridgeSolver.predict(B.vstack((B.array(self._outputBias), self._x)).T)
                        else:
                            feedbackData = B.dot(self._WOut, B.vstack((B.array(self._outputBias), self._x)))
                        if t >= transientTime:
                            Y[t-transientTime, :] = feedbackData
                    else:
                        feedbackData = outputData[t]
                
                    if (verbose > 0):
                        bar.update(t)
            else:
                for t in range(inputLength):
                    u = self.update(inputData[t], feedbackData, x=x)
                    if (t >= transientTime):
                        #add valueset to the states' matrix
                        X[:,t-transientTime] = B.vstack((B.array(self._outputBias), self._outputInputScaling*u, x))[:,0]
                    if outputData is None:
                        #calculate the prediction using the trained model
                        if (self._solver in ["sklearn_auto", "sklearn_lsqr", "sklearn_sag", "sklearn_svd"]):
                            feedbackData = self._ridgeSolver.predict(B.vstack((B.array(self._outputBias), self._outputInputScaling*u, self._x)).T)
                        else:
                            feedbackData = B.dot(self._WOut, B.vstack((B.array(self._outputBias), self._outputInputScaling*u, self._x)))
                        Y[t, :] = feedbackData.ravel()
                    else:
                        feedbackData = outputData[t]
                
                    if (verbose > 0):
                        bar.update(t)
                                 
        if (verbose > 0):
            bar.finish()

        return X, Y
Exemple #13
0
    def _createReservoir(self,
                         weightGeneration,
                         feedback=False,
                         verbose=False):
        # naive generation of the matrix W by using random weights
        if weightGeneration == "naive":
            # random weight matrix from -0.5 to 0.5
            self._W = B.array(B.rand(self.n_reservoir, self.n_reservoir) - 0.5)

            # set sparseness% to zero
            mask = B.rand(self.n_reservoir,
                          self.n_reservoir) > self._reservoirDensity
            self._W[mask] = 0.0

            _W_eigenvalues = B.abs(B.eigenval(self._W)[0])
            self._W *= self._spectralRadius / B.max(_W_eigenvalues)

        # generation using the SORM technique (see http://ftp.math.uni-rostock.de/pub/preprint/2012/pre12_01.pdf)
        elif weightGeneration == "SORM":
            self._W = B.identity(self.n_reservoir)

            number_nonzero_elements = (self._reservoirDensity *
                                       self.n_reservoir * self.n_reservoir)
            i = 0

            while B.count_nonzero(self._W) < number_nonzero_elements:
                i += 1
                Q = self.create_random_rotation_matrix()
                self._W = Q.dot(self._W)

            self._W *= self._spectralRadius

        # generation using the proposed method of Yildiz
        elif weightGeneration == "advanced":
            # two create W we must follow some steps:
            # at first, create a W = |W|
            # make it sparse
            # then scale its spectral radius to rho(W) = 1 (according to Yildiz with x(n+1) = (1-a)*x(n)+a*f(...))
            # then change randomly the signs of the matrix

            # random weight matrix from 0 to 0.5

            self._W = B.array(B.rand(self.n_reservoir, self.n_reservoir) / 2)

            # set sparseness% to zero
            mask = B.rand(self.n_reservoir,
                          self.n_reservoir) > self._reservoirDensity
            self._W[mask] = 0.0

            _W_eigenvalue = B.max(B.abs(B.eigvals(self._W)))

            self._W *= self._spectralRadius / _W_eigenvalue

            if verbose:
                M = self._leakingRate * self._W + (
                    1 - self._leakingRate) * B.identity(n=self._W.shape[0])
                M_eigenvalue = B.max(B.abs(B.eigenval(M)[0]))
                print("eff. spectral radius: {0}".format(M_eigenvalue))

            # change random signs
            random_signs = B.power(-1, B.randint(1, 3, (self.n_reservoir, )))

            self._W = B.multiply(self._W, random_signs)
        elif weightGeneration == "custom":
            pass
        else:
            raise ValueError(
                "The weightGeneration property must be one of the following values: naive, advanced, SORM, custom"
            )

        # check of the user is really using one of the internal methods, or wants to create W by his own
        if weightGeneration != "custom":
            self._createInputMatrix()

        # create the optional feedback matrix
        if feedback:
            self._WFeedback = B.rand(self.n_reservoir, 1 + self.n_output) - 0.5
            self._WFeedback *= self._feedbackScaling
        else:
            self._WFeedback = None
Exemple #14
0
    def __init__(
        self,
        n_input,
        n_reservoir,
        n_output,
        spectralRadius=1.0,
        noiseLevel=0.01,
        inputScaling=None,
        leakingRate=1.0,
        feedbackScaling=1.0,
        reservoirDensity=0.2,
        randomSeed=None,
        out_activation=lambda x: x,
        out_inverse_activation=lambda x: x,
        weightGeneration="naive",
        bias=1.0,
        outputBias=1.0,
        outputInputScaling=1.0,
        feedback=False,
        inputDensity=1.0,
        activation=B.tanh,
        activationDerivative=lambda x: 1.0 / B.cosh(x)**2,
    ):
        """Implementation of a ESN.

        Args:
            n_input : Dimensionality of the input.
            n_reservoir : Number of units in the reservoir.
            n_output : Dimensionality of the output.
            spectralRadius : Spectral radius of the reservoir's connection/weight matrix.
            noiseLevel : Magnitude of noise that is added to the input while fitting to prevent overfitting.
            inputScaling : Scaling factor of the input.
            leakingRate : Convex combination factor between 0 and 1 that weights current and new state value.
            feedbackScaling : Rescaling factor of the output-to-input feedback in the update process.
            reservoirDensity : Percentage of non-zero weight connections in the reservoir.
            randomSeed : Seed for random processes, e.g. weight initialization.
            out_activation : Final activation function (i.e. activation function of the output).
            out_inverse_activation : Inverse of the final activation function
            weightGeneration : Algorithm to generate weight matrices. Choices: naive, SORM, advanced, custom
            bias : Size of the bias added for the internal update process.
            outputBias : Size of the bias added for the final linear regression of the output.
            outputInputScaling : Rescaling factor for the input of the ESN for the regression.
            feedback : Include output-input feedback in the ESN.
            inputDensity : Percentage of non-zero weights in the input-to-reservoir weight matrix.
            activation : (Non-linear) Activation function.
            activationDerivative : Derivative of the activation function.
        """

        self.n_input = n_input
        self.n_reservoir = n_reservoir
        self.n_output = n_output

        self._spectralRadius = spectralRadius
        self._noiseLevel = noiseLevel
        self._reservoirDensity = reservoirDensity
        self._leakingRate = leakingRate
        self._feedbackScaling = feedbackScaling
        self.inputDensity = inputDensity
        self._activation = activation
        self._activationDerivative = activationDerivative
        self._inputScaling = inputScaling

        if self._inputScaling is None:
            self._inputScaling = 1.0
        if np.isscalar(self._inputScaling):
            self._inputScaling = B.ones(n_input) * self._inputScaling
        else:
            if len(self._inputScaling) != self.n_input:
                raise ValueError(
                    "Dimension of inputScaling ({0}) does not match the input data dimension ({1})"
                    .format(len(self._inputScaling), n_input))
            self._inputScaling = inputScaling

        self._expandedInputScaling = B.vstack(
            (B.array(1.0), self._inputScaling.reshape(-1, 1))).flatten()

        self.out_activation = out_activation
        self.out_inverse_activation = out_inverse_activation

        if randomSeed is not None:
            B.seed(randomSeed)
            np.random.seed(randomSeed)

        self._bias = bias
        self._outputBias = outputBias
        self._outputInputScaling = outputInputScaling
        self._createReservoir(weightGeneration, feedback)
Exemple #15
0
    def generate(self,
                 n,
                 inputData=None,
                 initialOutputData=None,
                 continuation=True,
                 initialData=None,
                 update_processor=lambda x: x,
                 verbose=0):
        #initialOutputData is the output of the last step BEFORE the generation shall start, e.g. the last step of the training's output

        #check the input data
        #if (self.n_input != self.n_output):
        #    raise ValueError("n_input does not equal n_output. The generation mode uses its own output as its input. Therefore, n_input has to be equal to n_output - please adjust these numbers!")

        if inputData is not None:
            inputData = B.array(inputData)

        if initialOutputData is not None:
            initialOutputData = B.array(initialOutputData)

        if initialData is not None:
            initialData = B.array(initialData)

        if initialOutputData is None and initialData is None:
            raise ValueError(
                "Either intitialOutputData or initialData must be different from None, as the network needs an initial output value"
            )

        if initialOutputData is None and initialData is not None:
            initialOutputData = initialData[1][-1]

        if inputData is not None:
            inputData = B.array(inputData)
        if initialData is not None:
            initialData = B.array(initialData)

        #let some input run through the ESN to initialize its states from a new starting value
        if not continuation:
            self._x = B.zeros(self._x.shape)

            if initialData is not None:
                if type(initialData) is tuple:
                    initialDataInput, initialDataOutput = initialData
                    if initialDataInput is not None and len(
                            initialDataInput) != len(initialDataOutput):
                        raise ValueError(
                            "Length of the inputData and the outputData of the initialData tuple do not match."
                        )
                else:
                    raise ValueError(
                        "initialData has to be a tuple consisting out of the input and the output data."
                    )

                for t in range(initialDataInput.shape[0]):
                    super(PredictionESN, self).update(initialDataInput[t],
                                                      initialDataOutput[t])

        if self.n_input != 0:
            if inputData is None:
                raise ValueError("inputData must not be None.")
            elif len(inputData) < n:
                raise ValueError("Length of inputData has to be >= n.")

        _, Y = self.propagate(inputData,
                              None,
                              verbose=verbose,
                              steps=n,
                              previousOutputData=initialOutputData)
        Y = update_processor(Y)

        #return the result
        return Y.T
Exemple #16
0
    def fit(self,
            inputData,
            outputData,
            transientTime="AutoReduce",
            transientTimeCalculationEpsilon=1e-3,
            transientTimeCalculationLength=20,
            verbose=0):
        #check the input data
        if self.n_input != 0:
            if len(inputData.shape) == 3 and len(outputData.shape) > 1:
                #multiple time series are used with a shape (timeseries, time, dimension) -> (timeseries, time, dimension)
                if inputData.shape[0] != outputData.shape[0]:
                    raise ValueError(
                        "Amount of input and output datasets is not equal - {0} != {1}"
                        .format(inputData.shape[0], outputData.shape[0]))
                if inputData.shape[1] != outputData.shape[1]:
                    raise ValueError(
                        "Amount of input and output time steps is not equal - {0} != {1}"
                        .format(inputData.shape[1], outputData.shape[1]))
            else:
                if inputData.shape[0] != outputData.shape[0]:
                    raise ValueError(
                        "Amount of input and output time steps is not equal - {0} != {1}"
                        .format(inputData.shape[0], outputData.shape[0]))
        else:
            if inputData is not None:
                raise ValueError(
                    "n_input has been set to zero. Therefore, the given inputData will not be used."
                )

        if inputData is not None:
            inputData = B.array(inputData)
        if outputData is not None:
            outputData = B.array(outputData)

        #reshape the input/output data to have the shape (timeseries, time, dimension)
        if len(outputData.shape) <= 2:
            outputData = outputData.reshape((1, -1, self.n_output))
        if inputData is not None:
            if len(inputData.shape) <= 2:
                inputData = inputData.reshape((1, -1, self.n_input))

        self.resetState()

        # Automatic transient time calculations
        if transientTime == "Auto":
            transientTime = self.calculateTransientTime(
                inputData[0], outputData[0], transientTimeCalculationEpsilon,
                transientTimeCalculationLength)
        if transientTime == "AutoReduce":
            if (inputData is None
                    and outputData.shape[2] == 1) or inputData.shape[2] == 1:
                transientTime = self.calculateTransientTime(
                    inputData[0], outputData[0],
                    transientTimeCalculationEpsilon,
                    transientTimeCalculationLength)
                transientTime = self.reduceTransientTime(
                    inputData[0], outputData[0], transientTime)
            else:
                print(
                    "Transient time reduction is supported only for 1 dimensional input."
                )

        if inputData is not None:
            partialLength = (inputData.shape[1] - transientTime)
            totalLength = inputData.shape[0] * partialLength
            timeseriesCount = inputData.shape[0]
        elif outputData is not None:
            partialLength = (outputData.shape[1] - transientTime)
            totalLength = outputData.shape[0] * partialLength
            timeseriesCount = outputData.shape[0]
        else:
            raise ValueError("Either input or output data must not to be None")

        self._X = B.empty((1 + self.n_input + self.n_reservoir, totalLength))

        if (verbose > 0):
            bar = progressbar.ProgressBar(max_value=totalLength,
                                          redirect_stdout=True,
                                          poll_interval=0.0001)
            bar.update(0)

        for i in range(timeseriesCount):
            if inputData is not None:
                xx, yy = self.propagate(inputData[i], outputData[i],
                                        transientTime, verbose - 1)
                self._X[:, i * partialLength:(i + 1) * partialLength] = xx
            else:
                xx, yy = self.propagate(None, outputData[i], transientTime,
                                        verbose - 1)
                self._X[:, i * partialLength:(i + 1) * partialLength] = xx
            if (verbose > 0):
                bar.update(i)
        if (verbose > 0):
            bar.finish()

        #define the target values
        Y_target = B.empty((outputData.shape[2], totalLength))
        for i in range(timeseriesCount):
            Y_target[:, i * partialLength:(i + 1) *
                     partialLength] = self.out_inverse_activation(
                         outputData[i]).T[:, transientTime:]

        if (self._solver == "pinv"):
            self._WOut = B.dot(Y_target, B.pinv(self._X))

            #calculate the training prediction now
            train_prediction = self.out_activation((B.dot(self._WOut,
                                                          self._X)).T)

        # elif (self._solver == "lsqr"):
        #     X_T = self._X.T
        #     self._WOut = B.dot(B.dot(Y_target, X_T),B.inv(B.dot(self._X,X_T) + self._regressionParameters[0]*B.identity(1+self.n_input+self.n_reservoir)))

        #     """
        #         #alternative representation of the equation

        #         Xt = X.T

        #         A = np.dot(X, Y_target.T)

        #         B = np.linalg.inv(np.dot(X, Xt)  + regression_parameter*np.identity(1+self.n_input+self.n_reservoir))

        #         self._WOut = np.dot(B, A)
        #         self._WOut = self._WOut.T
        #     """

        #     #calculate the training prediction now
        #     train_prediction = self.out_activation(B.dot(self._WOut, self._X).T)

        elif (self._solver in [
                "sklearn_auto", "sklearn_lsqr", "sklearn_sag", "sklearn_svd"
        ]):
            mode = self._solver[8:]
            params = self._regressionParameters
            params["solver"] = mode
            self._ridgeSolver = Ridge(**params)

            self._ridgeSolver.fit(self._X.T, Y_target.T)

            #calculate the training prediction now
            train_prediction = self.out_activation(
                self._ridgeSolver.predict(self._X.T))

        elif (self._solver in ["sklearn_svr", "sklearn_svc"]):
            self._ridgeSolver = SVR(**self._regressionParameters)

            self._ridgeSolver.fit(self._X.T, Y_target.T.ravel())

            #calculate the training prediction now
            train_prediction = self.out_activation(
                self._ridgeSolver.predict(self._X.T))

        #calculate the training error now
        #flatten the outputData
        outputData = outputData[:, transientTime:, :].reshape(totalLength, -1)
        training_error = B.sqrt(B.mean((train_prediction - outputData)**2))
        return training_error
Exemple #17
0
    def _createReservoir(self, weightGeneration, feedback=False, verbose=False):
        #naive generation of the matrix W by using random weights
        if weightGeneration == 'naive':
            #random weight matrix from -0.5 to 0.5
            self._W = B.array(rnd.rand(self.n_reservoir, self.n_reservoir) - 0.5)

            #set sparseness% to zero
            mask = rnd.rand(self.n_reservoir, self.n_reservoir) > self._reservoirDensity
            self._W[mask] = 0.0

            _W_eigenvalues = B.abs(B.eigenval(self._W)[0])
            self._W *= self._spectralRadius / B.max(_W_eigenvalues)

        #generation using the SORM technique (see http://ftp.math.uni-rostock.de/pub/preprint/2012/pre12_01.pdf)
        elif weightGeneration == "SORM":
            self._W = B.identity(self.n_reservoir)

            number_nonzero_elements = self._reservoirDensity * self.n_reservoir * self.n_reservoir
            i = 0

            while np.count_nonzero(self._W) < number_nonzero_elements:
                i += 1
                Q = self.create_random_rotation_matrix()
                self._W = Q.dot(self._W)
            
            self._W *= self._spectralRadius

        #generation using the proposed method of Yildiz
        elif weightGeneration == 'advanced':
            #two create W we must follow some steps:
            #at first, create a W = |W|
            #make it sparse
            #then scale its spectral radius to rho(W) = 1 (according to Yildiz with x(n+1) = (1-a)*x(n)+a*f(...))
            #then change randomly the signs of the matrix

            #random weight matrix from 0 to 0.5

            self._W = B.array(rnd.rand(self.n_reservoir, self.n_reservoir) / 2)

            #set sparseness% to zero
            mask = B.rand(self.n_reservoir, self.n_reservoir) > self._reservoirDensity
            self._W[mask] = 0.0

            from scipy.sparse.linalg.eigen.arpack.arpack import ArpackNoConvergence
            #just calculate the largest EV - hopefully this is the right code to do so...
            try:
                #this is just a good approximation, so this code might fail
                _W_eigenvalue = B.max(np.abs(sp.sparse.linalg.eigs(self._W, k=1)[0]))
            except ArpackNoConvergence:
                #this is the safe fall back method to calculate the EV
                _W_eigenvalue = B.max(B.abs(sp.linalg.eigvals(self._W)))
            #_W_eigenvalue = B.max(B.abs(np.linalg.eig(self._W)[0]))

            self._W *= self._spectralRadius / _W_eigenvalue

            if verbose:
                M = self._leakingRate*self._W + (1 - self._leakingRate)*np.identity(n=self._W.shape[0])
                M_eigenvalue = B.max(B.abs(B.eigenval(M)[0]))#np.max(np.abs(sp.sparse.linalg.eigs(M, k=1)[0]))
                print("eff. spectral radius: {0}".format(M_eigenvalue))

            #change random signs
            random_signs = B.power(-1, rnd.random_integers(self.n_reservoir, self.n_reservoir))

            self._W = B.multiply(self._W, random_signs)
        elif weightGeneration == 'custom':
            pass
        else:
            raise ValueError("The weightGeneration property must be one of the following values: naive, advanced, SORM, custom")

        #check of the user is really using one of the internal methods, or wants to create W by his own
        if (weightGeneration != 'custom'):
            self._createInputMatrix()

        #create the optional feedback matrix
        if feedback:
            self._WFeedback = B.rand(self.n_reservoir, 1 + self.n_output) - 0.5
            self._WFeedback *= self._feedbackScaling
        else:
            self._WFeedback = None