Esempio n. 1
0
def autocorrelation(self, x):
    n = x.shape[0]
    variance = B.var(x)
    x = x - B.mean(x)
    r = B.correlate(x, x, mode="full")[-n:]
    assert B.allclose(
        r, B.array([(x[:n - k] * x[-(n - k):]).sum() for k in range(n)]))
    result = r / (variance * (B.arange(n, 0, -1)))
    return result
Esempio n. 2
0
    def predict_loop(self,
                     inputData,
                     outputData1,
                     outputData2,
                     continuation=True,
                     initialData=None,
                     update_processor=lambda x: x,
                     verbose=0):
        inputData = B.array(inputData)

        #let some input run through the ESN to initialize its states from a new starting value
        if (not continuation):
            self._esn1._x = B.zeros(self._esn1._x.shape)
            self._esn2._x = B.zeros(self._esn2._x.shape)

        total_length = inputData.shape[0]
        aggregated_y1 = B.empty((total_length, self._n_output1))
        aggregated_y2 = B.empty((total_length, self._n_output2))

        # X1, y1 = self._esn1.propagate(inputData=inputData, outputData=None, transientTime=self._transientTime, verbose=verbose-1)
        # Y1 = B.dot(self._esn1._WOut, X1)
        # Y1 = update_processor(self.out_activation(Y1)).T

        y2 = self.out_activation(B.dot(self._esn2._WOut, self._esn2._X).T)

        for i in range(total_length):

            inputDataWithFeedback = B.zeros((self.n_input + self._n_output2))
            inputDataWithFeedback[:self.n_input] = inputData[i, :]
            inputDataWithFeedback[self.n_input:] = y2

            #update models
            X1, _ = self._esn1.propagateOneStep(
                inputData=inputDataWithFeedback,
                outputData=None,
                step=i,
                transientTime=self._transientTime,
                verbose=verbose - 1,
                learn=False)
            y1 = B.dot(self._esn1._WOut, X1)
            aggregated_y1[i, :] = update_processor(self.out_activation(y1)).T

            #output from 1st layer and correct output
            #input2 = outputData1[i] - y1.reshape(self._n_output1)
            # input2 = B.vstack((y1.reshape(self._n_output1), outputData1[i]))
            # x2, y2 = self._esn2.propagateOneStep(inputData=input2, outputData=None, step=i, transientTime=self._transientTime, verbose=verbose-1, learn=False)
            # Y_target2[i,:]  = y2.reshape(self._n_output2)

        training_error1 = B.sqrt(B.mean((aggregated_y1 - outputData1)**2))

        print("training errors")
        print(training_error1)

        return aggregated_y1, aggregated_y2
Esempio n. 3
0
    def predict(self,
                inputData,
                outputData1,
                outputData2,
                continuation=True,
                initialData=None,
                update_processor=lambda x: x,
                verbose=0):
        inputData = B.array(inputData)

        #let some input run through the ESN to initialize its states from a new starting value
        if (not continuation):
            self._esn1._x = B.zeros(self._esn1._x.shape)
            self._esn2._x = B.zeros(self._esn2._x.shape)

        total_length = inputData.shape[0]
        aggregated_y1 = B.empty((total_length, self._n_output1))
        aggregated_y2 = B.empty((total_length, self._n_output2))

        #put pixels and switch data together
        inputDataWithFeedback = B.zeros(
            (total_length, self.n_input + self._n_output2))
        inputDataWithFeedback[:, :self.n_input] = inputData
        inputDataWithFeedback[:, self.n_input:] = outputData2

        X1, _ = self._esn1.propagate(inputData=inputDataWithFeedback,
                                     outputData=None,
                                     transientTime=self._transientTime,
                                     verbose=verbose - 1)
        aggregated_y1 = B.dot(self._esn1._WOut, X1)
        aggregated_y1 = update_processor(self.out_activation(aggregated_y1)).T

        training_error1 = B.sqrt(B.mean((aggregated_y1 - outputData1)**2))

        print("predict errors")
        print(training_error1)

        return aggregated_y1, aggregated_y2
Esempio n. 4
0
    def fit(self,
            inputData,
            outputData,
            transientTime="AutoReduce",
            transientTimeCalculationEpsilon=1e-3,
            transientTimeCalculationLength=20,
            verbose=0):
        #check the input data
        if self.n_input != 0:
            if len(inputData.shape) == 3 and len(outputData.shape) > 1:
                #multiple time series are used with a shape (timeseries, time, dimension) -> (timeseries, time, dimension)
                if inputData.shape[0] != outputData.shape[0]:
                    raise ValueError(
                        "Amount of input and output datasets is not equal - {0} != {1}"
                        .format(inputData.shape[0], outputData.shape[0]))
                if inputData.shape[1] != outputData.shape[1]:
                    raise ValueError(
                        "Amount of input and output time steps is not equal - {0} != {1}"
                        .format(inputData.shape[1], outputData.shape[1]))
            else:
                if inputData.shape[0] != outputData.shape[0]:
                    raise ValueError(
                        "Amount of input and output time steps is not equal - {0} != {1}"
                        .format(inputData.shape[0], outputData.shape[0]))
        else:
            if inputData is not None:
                raise ValueError(
                    "n_input has been set to zero. Therefore, the given inputData will not be used."
                )

        if inputData is not None:
            inputData = B.array(inputData)
        if outputData is not None:
            outputData = B.array(outputData)

        #reshape the input/output data to have the shape (timeseries, time, dimension)
        if len(outputData.shape) <= 2:
            outputData = outputData.reshape((1, -1, self.n_output))
        if inputData is not None:
            if len(inputData.shape) <= 2:
                inputData = inputData.reshape((1, -1, self.n_input))

        self.resetState()

        # Automatic transient time calculations
        if transientTime == "Auto":
            transientTime = self.calculateTransientTime(
                inputData[0], outputData[0], transientTimeCalculationEpsilon,
                transientTimeCalculationLength)
        if transientTime == "AutoReduce":
            if (inputData is None
                    and outputData.shape[2] == 1) or inputData.shape[2] == 1:
                transientTime = self.calculateTransientTime(
                    inputData[0], outputData[0],
                    transientTimeCalculationEpsilon,
                    transientTimeCalculationLength)
                transientTime = self.reduceTransientTime(
                    inputData[0], outputData[0], transientTime)
            else:
                print(
                    "Transient time reduction is supported only for 1 dimensional input."
                )

        if inputData is not None:
            partialLength = (inputData.shape[1] - transientTime)
            totalLength = inputData.shape[0] * partialLength
            timeseriesCount = inputData.shape[0]
        elif outputData is not None:
            partialLength = (outputData.shape[1] - transientTime)
            totalLength = outputData.shape[0] * partialLength
            timeseriesCount = outputData.shape[0]
        else:
            raise ValueError("Either input or output data must not to be None")

        self._X = B.empty((1 + self.n_input + self.n_reservoir, totalLength))

        if (verbose > 0):
            bar = progressbar.ProgressBar(max_value=totalLength,
                                          redirect_stdout=True,
                                          poll_interval=0.0001)
            bar.update(0)

        for i in range(timeseriesCount):
            if inputData is not None:
                xx, yy = self.propagate(inputData[i], outputData[i],
                                        transientTime, verbose - 1)
                self._X[:, i * partialLength:(i + 1) * partialLength] = xx
            else:
                xx, yy = self.propagate(None, outputData[i], transientTime,
                                        verbose - 1)
                self._X[:, i * partialLength:(i + 1) * partialLength] = xx
            if (verbose > 0):
                bar.update(i)
        if (verbose > 0):
            bar.finish()

        #define the target values
        Y_target = B.empty((outputData.shape[2], totalLength))
        for i in range(timeseriesCount):
            Y_target[:, i * partialLength:(i + 1) *
                     partialLength] = self.out_inverse_activation(
                         outputData[i]).T[:, transientTime:]

        if (self._solver == "pinv"):
            self._WOut = B.dot(Y_target, B.pinv(self._X))

            #calculate the training prediction now
            train_prediction = self.out_activation((B.dot(self._WOut,
                                                          self._X)).T)

        # elif (self._solver == "lsqr"):
        #     X_T = self._X.T
        #     self._WOut = B.dot(B.dot(Y_target, X_T),B.inv(B.dot(self._X,X_T) + self._regressionParameters[0]*B.identity(1+self.n_input+self.n_reservoir)))

        #     """
        #         #alternative representation of the equation

        #         Xt = X.T

        #         A = np.dot(X, Y_target.T)

        #         B = np.linalg.inv(np.dot(X, Xt)  + regression_parameter*np.identity(1+self.n_input+self.n_reservoir))

        #         self._WOut = np.dot(B, A)
        #         self._WOut = self._WOut.T
        #     """

        #     #calculate the training prediction now
        #     train_prediction = self.out_activation(B.dot(self._WOut, self._X).T)

        elif (self._solver in [
                "sklearn_auto", "sklearn_lsqr", "sklearn_sag", "sklearn_svd"
        ]):
            mode = self._solver[8:]
            params = self._regressionParameters
            params["solver"] = mode
            self._ridgeSolver = Ridge(**params)

            self._ridgeSolver.fit(self._X.T, Y_target.T)

            #calculate the training prediction now
            train_prediction = self.out_activation(
                self._ridgeSolver.predict(self._X.T))

        elif (self._solver in ["sklearn_svr", "sklearn_svc"]):
            self._ridgeSolver = SVR(**self._regressionParameters)

            self._ridgeSolver.fit(self._X.T, Y_target.T.ravel())

            #calculate the training prediction now
            train_prediction = self.out_activation(
                self._ridgeSolver.predict(self._X.T))

        #calculate the training error now
        #flatten the outputData
        outputData = outputData[:, transientTime:, :].reshape(totalLength, -1)
        training_error = B.sqrt(B.mean((train_prediction - outputData)**2))
        return training_error
Esempio n. 5
0
    def fit(
        self,
        inputData,
        outputData,
        transientTime="AutoReduce",
        transientTimeCalculationEpsilon=1e-3,
        transientTimeCalculationLength=20,
        verbose=0,
    ):
        # check the input data
        if inputData.shape[0] != outputData.shape[0]:
            raise ValueError(
                "Amount of input and output datasets is not equal - {0} != {1}"
                .format(inputData.shape[0], outputData.shape[0]))

        nSequences = inputData.shape[0]
        trainingLength = inputData.shape[1]

        self._x = B.zeros((self.n_reservoir, 1))

        # Automatic transient time calculations
        if transientTime == "Auto":
            transientTime = self.calculateTransientTime(
                inputData,
                outputData,
                transientTimeCalculationEpsilon,
                transientTimeCalculationLength,
            )
        if transientTime == "AutoReduce":
            if (inputData is None
                    and outputData.shape[1] == 1) or inputData.shape[1] == 1:
                transientTime = self.calculateTransientTime(
                    inputData,
                    outputData,
                    transientTimeCalculationEpsilon,
                    transientTimeCalculationLength,
                )
                transientTime = self.reduceTransientTime(
                    inputData, outputData, transientTime)
            else:
                print(
                    "Transient time reduction is supported only for 1 dimensional input."
                )

        self._X = B.zeros((
            1 + self.n_input + self.n_reservoir,
            nSequences * (trainingLength - transientTime),
        ))
        Y_target = B.zeros(
            (self.n_output, (trainingLength - transientTime) * nSequences))

        if verbose > 0:
            bar = progressbar.ProgressBar(max_value=len(inputData),
                                          redirect_stdout=True,
                                          poll_interval=0.0001)
            bar.update(0)

        for n in range(len(inputData)):
            self._x = B.zeros((self.n_reservoir, 1))
            self._X[:, n * (trainingLength - transientTime):(n + 1) *
                    (trainingLength - transientTime), ] = self.propagate(
                        inputData[n], transientTime=transientTime, verbose=0)
            # set the target values
            Y_target[:, n * (trainingLength - transientTime):(n + 1) *
                     (trainingLength - transientTime), ] = np.tile(
                         self.out_inverse_activation(outputData[n]),
                         trainingLength - transientTime,
                     ).T

            if verbose > 0:
                bar.update(n)

        if verbose > 0:
            bar.finish()

        if self._solver == "pinv":
            self._WOut = B.dot(Y_target, B.pinv(self._X))

            # calculate the training prediction now
            train_prediction = self.out_activation((B.dot(self._WOut,
                                                          self._X)).T)

        elif self._solver == "lsqr":
            X_T = self._X.T
            self._WOut = B.dot(
                B.dot(Y_target, X_T),
                B.inv(
                    B.dot(self._X, X_T) + self._regressionParameters[0] *
                    B.identity(1 + self.n_input + self.n_reservoir)),
            )
            """
                #alternative represantation of the equation

                Xt = X.T

                A = np.dot(X, Y_target.T)

                B = np.linalg.inv(np.dot(X, Xt)  + regression_parameter*np.identity(1+self.n_input+self.n_reservoir))

                self._WOut = np.dot(B, A)
                self._WOut = self._WOut.T
            """

            # calculate the training prediction now
            train_prediction = self.out_activation(
                B.dot(self._WOut, self._X).T)

        elif self._solver in [
                "sklearn_auto",
                "sklearn_lsqr",
                "sklearn_sag",
                "sklearn_svd",
        ]:
            mode = self._solver[8:]
            params = self._regressionParameters
            params["solver"] = mode
            self._ridgeSolver = Ridge(**params)

            self._ridgeSolver.fit(self._X.T, Y_target.T)

            # calculate the training prediction now
            train_prediction = self.out_activation(
                self._ridgeSolver.predict(self._X.T))

        elif self._solver in ["sklearn_svr", "sklearn_svc"]:
            self._ridgeSolver = SVR(**self._regressionParameters)

            self._ridgeSolver.fit(self._X.T, Y_target.T.flatten())

            # calculate the training prediction now
            train_prediction = self.out_activation(
                self._ridgeSolver.predict(self._X.T))

        train_prediction = np.mean(train_prediction, 0)

        # calculate the training error now
        training_error = B.sqrt(B.mean((train_prediction - outputData.T)**2))
        return training_error
Esempio n. 6
0
    def fit_loop(self,
                 inputData,
                 outputData1,
                 outputData2,
                 transientTime="AutoReduce",
                 transientTimeCalculationEpsilon=1e-3,
                 transientTimeCalculationLength=20,
                 verbose=0):
        #check the input data
        if self.n_input != 0:
            if len(inputData.shape) == 3 and len(outputData1.shape) > 1:
                #multiple time series are used with a shape (timeseries, time, dimension) -> (timeseries, time, dimension)
                if inputData.shape[0] != outputData1.shape[0]:
                    raise ValueError(
                        "Amount of input and output datasets is not equal - {0} != {1}"
                        .format(inputData.shape[0], outputData1.shape[0]))
                if inputData.shape[1] != outputData1.shape[1]:
                    raise ValueError(
                        "Amount of input and output time steps is not equal - {0} != {1}"
                        .format(inputData.shape[1], outputData1.shape[1]))
            else:
                if inputData.shape[0] != outputData1.shape[0]:
                    raise ValueError(
                        "Amount of input and output time steps is not equal - {0} != {1}"
                        .format(inputData.shape[0], outputData1.shape[0]))
        else:
            if inputData is not None:
                raise ValueError(
                    "n_input has been set to zero. Therefore, the given inputData will not be used."
                )

        inputData = B.array(inputData)
        outputData1 = B.array(outputData1)
        outputData2 = B.array(outputData2)
        self._transientTime = transientTime

        self._esn1.resetState()
        self._esn2.resetState()

        total_length = inputData.shape[0]
        print("total_length ", total_length)

        if (verbose > 0):
            bar = progressbar.ProgressBar(max_value=total_length,
                                          redirect_stdout=True,
                                          poll_interval=0.0001)
            bar.update(0)

        #should be named aggregated_y
        aggregated_y1 = B.empty((outputData1.shape))
        aggregated_y2 = B.empty((outputData2.shape))

        # X1, _ = self._esn1.propagate(inputData=inputData, outputData=outputData1, transientTime=transientTime, verbose=verbose-1)
        # self._esn1._X = X1

        # Y_target = self.out_inverse_activation(outputData1).T
        # self._esn1._WOut = B.dot(Y_target, B.pinv(X1))
        # y1 = self.out_activation((B.dot(self._esn1._WOut, X1)).T)
        # training_error1 = B.sqrt(B.mean((y1.reshape((total_length, self._n_output1))- outputData1)**2))
        # training_error2 = 0

        y2 = self.out_activation(B.dot(self._esn2._WOut, self._esn2._X).T)

        for i in range((total_length)):

            #input: input + output from layer 2
            inputDataWithFeedback = B.zeros((self.n_input + self._n_output2))
            inputDataWithFeedback[:self.n_input] = inputData[i, :]
            inputDataWithFeedback[self.n_input:] = y2

            #update models
            X1, untrained_y1 = self._esn1.propagateOneStep(
                inputData=inputDataWithFeedback,
                outputData=outputData1[i],
                step=i,
                transientTime=transientTime,
                verbose=verbose - 1,
                learn=True)
            #self._esn1._X = X1
            #y after model update (ideally we would use y before model update)
            #y1 = self.out_activation((B.dot(self._esn1._WOut, X1)).T)
            aggregated_y1[i, :] = untrained_y1.reshape(self._n_output1)

            #output from 1st layer and correct output
            # input2 = B.vstack((y1.reshape(self._n_output1), outputData1[i]))
            # x2, y2 = self._esn2.propagateOneStep(inputData=input2, outputData=outputData2[i], step=i, transientTime=transientTime, verbose=verbose-1, learn=True)
            # Y_target2[i,:]  = y2.reshape(self._n_output2)

            if (verbose > 0):
                bar.update(i)
        if (verbose > 0):
            bar.finish()

        self._training_res = aggregated_y1
        training_error1 = B.sqrt(B.mean((aggregated_y1 - outputData1)**2))
        training_error2 = B.sqrt(B.mean((aggregated_y2 - outputData2)**2))

        print("training errors")
        print(training_error1)
        print(training_error2)

        return training_error1, training_error2
Esempio n. 7
0
    def fit(self,
            inputData,
            outputData1,
            outputData2,
            transientTime="AutoReduce",
            transientTimeCalculationEpsilon=1e-3,
            transientTimeCalculationLength=20,
            verbose=0):
        #check the input data
        if self.n_input != 0:
            if len(inputData.shape) == 3 and len(outputData1.shape) > 1:
                #multiple time series are used with a shape (timeseries, time, dimension) -> (timeseries, time, dimension)
                if inputData.shape[0] != outputData1.shape[0]:
                    raise ValueError(
                        "Amount of input and output datasets is not equal - {0} != {1}"
                        .format(inputData.shape[0], outputData1.shape[0]))
                if inputData.shape[1] != outputData1.shape[1]:
                    raise ValueError(
                        "Amount of input and output time steps is not equal - {0} != {1}"
                        .format(inputData.shape[1], outputData1.shape[1]))
            else:
                if inputData.shape[0] != outputData1.shape[0]:
                    raise ValueError(
                        "Amount of input and output time steps is not equal - {0} != {1}"
                        .format(inputData.shape[0], outputData1.shape[0]))
        else:
            if inputData is not None:
                raise ValueError(
                    "n_input has been set to zero. Therefore, the given inputData will not be used."
                )

        inputData = B.array(inputData)
        outputData1 = B.array(outputData1)
        outputData2 = B.array(outputData2)
        self._transientTime = transientTime

        self._esn1.resetState()
        self._esn2.resetState()

        total_length = inputData.shape[0]
        print("total_length ", total_length)

        aggregated_y1 = B.empty((outputData1.shape))
        aggregated_y2 = B.empty((outputData2.shape))

        #put pixels and switch data together
        inputDataWithFeedback = B.zeros(
            (total_length, self.n_input + self._n_output2))
        inputDataWithFeedback[:, :self.n_input] = inputData
        inputDataWithFeedback[:, self.n_input:] = outputData2

        X1, _ = self._esn1.propagate(inputData=inputDataWithFeedback,
                                     outputData=outputData1,
                                     transientTime=transientTime,
                                     verbose=verbose - 1)
        self._esn1._X = X1

        Y_target = self.out_inverse_activation(outputData1).T
        self._esn1._WOut = B.dot(Y_target, B.pinv(X1))
        aggregated_y1 = self.out_activation((B.dot(self._esn1._WOut, X1)).T)
        training_error1 = B.sqrt(
            B.mean((aggregated_y1.reshape(
                (total_length, self._n_output1)) - outputData1)**2))
        training_error2 = 0

        training_error1 = B.sqrt(B.mean((aggregated_y1 - outputData1)**2))
        training_error2 = B.sqrt(B.mean((aggregated_y2 - outputData2)**2))

        return training_error1, training_error2
Esempio n. 8
0
def loss(prediction, target):
    return B.mean((prediction - target)**2)