def _fitProcess(self, data): try: inData, outData, indices, state = data transientTime = self.sharedNamespace.transientTime partialLength = self.sharedNamespace.partialLength totalLength = self.sharedNamespace.totalLength timeseriesCount = self.sharedNamespace.timeseriesCount workerID = self.parallelWorkerIDs.get() self._x[workerID] = state # propagate X = B.empty((1 + self.n_input + self.n_reservoir, totalLength)) for i in range(timeseriesCount): X[:, i * partialLength:(i + 1) * partialLength] = self.propagate(inData[i], transientTime=transientTime, x=self._x[workerID], verbose=0) # define the target values Y_target = B.empty((1, totalLength)) for i in range(timeseriesCount): Y_target[:, i * partialLength:(i + 1) * partialLength] = self.out_inverse_activation(outData[i]).T[:, transientTime:] # now fit WOut = None if self._solver == "pinv": WOut = B.dot(Y_target, B.pinv(X)) elif self._solver == "lsqr": X_T = X.T WOut = B.dot(B.dot(Y_target, X_T), B.inv( B.dot(X, X_T) + self._regressionParameters[0] * B.identity(1 + self.n_input + self.n_reservoir))) # calculate the training prediction now # trainingPrediction = self.out_activation(B.dot(WOut, X).T) # store the state and the output matrix of the worker SpatioTemporalESN._fitProcess.fitQueue.put( ([x - self._filterWidth for x in indices], self._x[workerID].copy(), WOut.copy())) self.parallelWorkerIDs.put(workerID) except Exception as ex: print(ex) import traceback traceback.print_exc() SpatioTemporalESN._fitProcess.fitQueue.put(([x - self._filterWidth for x in indices], None, None)) self.parallelWorkerIDs.put(workerID)
def create_random_rotation_matrix(self): h = rnd.randint(low=0, high=self.n_reservoir) k = rnd.randint(low=0, high=self.n_reservoir) phi = rnd.rand(1)*2*np.pi Q = B.identity(self.n_reservoir) Q[h, h] = np.cos(phi) Q[k, k] = np.cos(phi) Q[h, k] = -np.sin(phi) Q[k, h] = np.sin(phi) return Q
def fit( self, inputData, outputData, transientTime="AutoReduce", transientTimeCalculationEpsilon=1e-3, transientTimeCalculationLength=20, verbose=0, ): # check the input data if inputData.shape[0] != outputData.shape[0]: raise ValueError( "Amount of input and output datasets is not equal - {0} != {1}" .format(inputData.shape[0], outputData.shape[0])) nSequences = inputData.shape[0] trainingLength = inputData.shape[1] self._x = B.zeros((self.n_reservoir, 1)) # Automatic transient time calculations if transientTime == "Auto": transientTime = self.calculateTransientTime( inputData, outputData, transientTimeCalculationEpsilon, transientTimeCalculationLength, ) if transientTime == "AutoReduce": if (inputData is None and outputData.shape[1] == 1) or inputData.shape[1] == 1: transientTime = self.calculateTransientTime( inputData, outputData, transientTimeCalculationEpsilon, transientTimeCalculationLength, ) transientTime = self.reduceTransientTime( inputData, outputData, transientTime) else: print( "Transient time reduction is supported only for 1 dimensional input." ) self._X = B.zeros(( 1 + self.n_input + self.n_reservoir, nSequences * (trainingLength - transientTime), )) Y_target = B.zeros( (self.n_output, (trainingLength - transientTime) * nSequences)) if verbose > 0: bar = progressbar.ProgressBar(max_value=len(inputData), redirect_stdout=True, poll_interval=0.0001) bar.update(0) for n in range(len(inputData)): self._x = B.zeros((self.n_reservoir, 1)) self._X[:, n * (trainingLength - transientTime):(n + 1) * (trainingLength - transientTime), ] = self.propagate( inputData[n], transientTime=transientTime, verbose=0) # set the target values Y_target[:, n * (trainingLength - transientTime):(n + 1) * (trainingLength - transientTime), ] = np.tile( self.out_inverse_activation(outputData[n]), trainingLength - transientTime, ).T if verbose > 0: bar.update(n) if verbose > 0: bar.finish() if self._solver == "pinv": self._WOut = B.dot(Y_target, B.pinv(self._X)) # calculate the training prediction now train_prediction = self.out_activation((B.dot(self._WOut, self._X)).T) elif self._solver == "lsqr": X_T = self._X.T self._WOut = B.dot( B.dot(Y_target, X_T), B.inv( B.dot(self._X, X_T) + self._regressionParameters[0] * B.identity(1 + self.n_input + self.n_reservoir)), ) """ #alternative represantation of the equation Xt = X.T A = np.dot(X, Y_target.T) B = np.linalg.inv(np.dot(X, Xt) + regression_parameter*np.identity(1+self.n_input+self.n_reservoir)) self._WOut = np.dot(B, A) self._WOut = self._WOut.T """ # calculate the training prediction now train_prediction = self.out_activation( B.dot(self._WOut, self._X).T) elif self._solver in [ "sklearn_auto", "sklearn_lsqr", "sklearn_sag", "sklearn_svd", ]: mode = self._solver[8:] params = self._regressionParameters params["solver"] = mode self._ridgeSolver = Ridge(**params) self._ridgeSolver.fit(self._X.T, Y_target.T) # calculate the training prediction now train_prediction = self.out_activation( self._ridgeSolver.predict(self._X.T)) elif self._solver in ["sklearn_svr", "sklearn_svc"]: self._ridgeSolver = SVR(**self._regressionParameters) self._ridgeSolver.fit(self._X.T, Y_target.T.flatten()) # calculate the training prediction now train_prediction = self.out_activation( self._ridgeSolver.predict(self._X.T)) train_prediction = np.mean(train_prediction, 0) # calculate the training error now training_error = B.sqrt(B.mean((train_prediction - outputData.T)**2)) return training_error
def _createReservoir(self, weightGeneration, feedback=False, verbose=False): #naive generation of the matrix W by using random weights if weightGeneration == 'naive': #random weight matrix from -0.5 to 0.5 self._W = B.array(rnd.rand(self.n_reservoir, self.n_reservoir) - 0.5) #set sparseness% to zero mask = rnd.rand(self.n_reservoir, self.n_reservoir) > self._reservoirDensity self._W[mask] = 0.0 _W_eigenvalues = B.abs(B.eigenval(self._W)[0]) self._W *= self._spectralRadius / B.max(_W_eigenvalues) #generation using the SORM technique (see http://ftp.math.uni-rostock.de/pub/preprint/2012/pre12_01.pdf) elif weightGeneration == "SORM": self._W = B.identity(self.n_reservoir) number_nonzero_elements = self._reservoirDensity * self.n_reservoir * self.n_reservoir i = 0 while np.count_nonzero(self._W) < number_nonzero_elements: i += 1 Q = self.create_random_rotation_matrix() self._W = Q.dot(self._W) self._W *= self._spectralRadius #generation using the proposed method of Yildiz elif weightGeneration == 'advanced': #two create W we must follow some steps: #at first, create a W = |W| #make it sparse #then scale its spectral radius to rho(W) = 1 (according to Yildiz with x(n+1) = (1-a)*x(n)+a*f(...)) #then change randomly the signs of the matrix #random weight matrix from 0 to 0.5 self._W = B.array(rnd.rand(self.n_reservoir, self.n_reservoir) / 2) #set sparseness% to zero mask = B.rand(self.n_reservoir, self.n_reservoir) > self._reservoirDensity self._W[mask] = 0.0 from scipy.sparse.linalg.eigen.arpack.arpack import ArpackNoConvergence #just calculate the largest EV - hopefully this is the right code to do so... try: #this is just a good approximation, so this code might fail _W_eigenvalue = B.max(np.abs(sp.sparse.linalg.eigs(self._W, k=1)[0])) except ArpackNoConvergence: #this is the safe fall back method to calculate the EV _W_eigenvalue = B.max(B.abs(sp.linalg.eigvals(self._W))) #_W_eigenvalue = B.max(B.abs(np.linalg.eig(self._W)[0])) self._W *= self._spectralRadius / _W_eigenvalue if verbose: M = self._leakingRate*self._W + (1 - self._leakingRate)*np.identity(n=self._W.shape[0]) M_eigenvalue = B.max(B.abs(B.eigenval(M)[0]))#np.max(np.abs(sp.sparse.linalg.eigs(M, k=1)[0])) print("eff. spectral radius: {0}".format(M_eigenvalue)) #change random signs random_signs = B.power(-1, rnd.random_integers(self.n_reservoir, self.n_reservoir)) self._W = B.multiply(self._W, random_signs) elif weightGeneration == 'custom': pass else: raise ValueError("The weightGeneration property must be one of the following values: naive, advanced, SORM, custom") #check of the user is really using one of the internal methods, or wants to create W by his own if (weightGeneration != 'custom'): self._createInputMatrix() #create the optional feedback matrix if feedback: self._WFeedback = B.rand(self.n_reservoir, 1 + self.n_output) - 0.5 self._WFeedback *= self._feedbackScaling else: self._WFeedback = None
def _createReservoir(self, weightGeneration, feedback=False, verbose=False): # naive generation of the matrix W by using random weights if weightGeneration == "naive": # random weight matrix from -0.5 to 0.5 self._W = B.array(B.rand(self.n_reservoir, self.n_reservoir) - 0.5) # set sparseness% to zero mask = B.rand(self.n_reservoir, self.n_reservoir) > self._reservoirDensity self._W[mask] = 0.0 _W_eigenvalues = B.abs(B.eigenval(self._W)[0]) self._W *= self._spectralRadius / B.max(_W_eigenvalues) # generation using the SORM technique (see http://ftp.math.uni-rostock.de/pub/preprint/2012/pre12_01.pdf) elif weightGeneration == "SORM": self._W = B.identity(self.n_reservoir) number_nonzero_elements = (self._reservoirDensity * self.n_reservoir * self.n_reservoir) i = 0 while B.count_nonzero(self._W) < number_nonzero_elements: i += 1 Q = self.create_random_rotation_matrix() self._W = Q.dot(self._W) self._W *= self._spectralRadius # generation using the proposed method of Yildiz elif weightGeneration == "advanced": # two create W we must follow some steps: # at first, create a W = |W| # make it sparse # then scale its spectral radius to rho(W) = 1 (according to Yildiz with x(n+1) = (1-a)*x(n)+a*f(...)) # then change randomly the signs of the matrix # random weight matrix from 0 to 0.5 self._W = B.array(B.rand(self.n_reservoir, self.n_reservoir) / 2) # set sparseness% to zero mask = B.rand(self.n_reservoir, self.n_reservoir) > self._reservoirDensity self._W[mask] = 0.0 _W_eigenvalue = B.max(B.abs(B.eigvals(self._W))) self._W *= self._spectralRadius / _W_eigenvalue if verbose: M = self._leakingRate * self._W + ( 1 - self._leakingRate) * B.identity(n=self._W.shape[0]) M_eigenvalue = B.max(B.abs(B.eigenval(M)[0])) print("eff. spectral radius: {0}".format(M_eigenvalue)) # change random signs random_signs = B.power(-1, B.randint(1, 3, (self.n_reservoir, ))) self._W = B.multiply(self._W, random_signs) elif weightGeneration == "custom": pass else: raise ValueError( "The weightGeneration property must be one of the following values: naive, advanced, SORM, custom" ) # check of the user is really using one of the internal methods, or wants to create W by his own if weightGeneration != "custom": self._createInputMatrix() # create the optional feedback matrix if feedback: self._WFeedback = B.rand(self.n_reservoir, 1 + self.n_output) - 0.5 self._WFeedback *= self._feedbackScaling else: self._WFeedback = None
def fit( self, inputData, outputData, transientTime="AutoReduce", transientTimeCalculationEpsilon=1e-3, transientTimeCalculationLength=20, verbose=0, ): # check the input data if self.n_input != 0: if len(inputData.shape) == 3 and len(outputData.shape) > 1: # multiple time series are used with a shape (timeseries, time, dimension) -> (timeseries, time, dimension) if inputData.shape[0] != outputData.shape[0]: raise ValueError( "Amount of input and output datasets is not equal - {0} != {1}" .format(inputData.shape[0], outputData.shape[0])) if inputData.shape[1] != outputData.shape[1]: raise ValueError( "Amount of input and output time steps is not equal - {0} != {1}" .format(inputData.shape[1], outputData.shape[1])) else: if inputData.shape[0] != outputData.shape[0]: raise ValueError( "Amount of input and output time steps is not equal - {0} != {1}" .format(inputData.shape[0], outputData.shape[0])) else: if inputData is not None: raise ValueError( "n_input has been set to zero. Therefore, the given inputData will not be used." ) if inputData is not None: inputData = B.array(inputData) if outputData is not None: outputData = B.array(outputData) # reshape the input/output data to have the shape (timeseries, time, dimension) if len(outputData.shape) <= 2: outputData = outputData.reshape((1, -1, self.n_output)) if inputData is not None: if len(inputData.shape) <= 2: inputData = inputData.reshape((1, -1, self.n_input)) self.resetState() # Automatic transient time calculations if transientTime == "Auto": transientTime = self.calculateTransientTime( inputData[0], outputData[0], transientTimeCalculationEpsilon, transientTimeCalculationLength, ) if transientTime == "AutoReduce": if (inputData is None and outputData.shape[2] == 1) or inputData.shape[2] == 1: transientTime = self.calculateTransientTime( inputData[0], outputData[0], transientTimeCalculationEpsilon, transientTimeCalculationLength, ) transientTime = self.reduceTransientTime( inputData[0], outputData[0], transientTime) else: print( "Transient time reduction is supported only for 1 dimensional input." ) if inputData is not None: partialLength = inputData.shape[1] - transientTime totalLength = inputData.shape[0] * partialLength timeseriesCount = inputData.shape[0] elif outputData is not None: partialLength = outputData.shape[1] - transientTime totalLength = outputData.shape[0] * partialLength timeseriesCount = outputData.shape[0] else: raise ValueError("Either input or output data must not to be None") self._X = B.empty((1 + self.n_input + self.n_reservoir, totalLength)) if verbose > 0: bar = progressbar.ProgressBar(max_value=totalLength, redirect_stdout=True, poll_interval=0.0001) bar.update(0) for i in range(timeseriesCount): if inputData is not None: self._X[:, i * partialLength:(i + 1) * partialLength] = self.propagate( inputData[i], outputData[i], transientTime, verbose - 1) else: self._X[:, i * partialLength:(i + 1) * partialLength] = self.propagate( None, outputData[i], transientTime, verbose - 1) if verbose > 0: bar.update(i) if verbose > 0: bar.finish() # define the target values Y_target = B.empty((outputData.shape[2], totalLength)) for i in range(timeseriesCount): Y_target[:, i * partialLength:(i + 1) * partialLength] = self.out_inverse_activation( outputData[i]).T[:, transientTime:] if self._solver == "pinv": self._WOut = B.dot(Y_target, B.pinv(self._X)) # calculate the training prediction now train_prediction = self.out_activation((B.dot(self._WOut, self._X)).T) elif self._solver == "lsqr": X_T = self._X.T self._WOut = B.dot( B.dot(Y_target, X_T), B.inv( B.dot(self._X, X_T) + self._regressionParameters[0] * B.identity(1 + self.n_input + self.n_reservoir)), ) """ #alternative represantation of the equation Xt = X.T A = np.dot(X, Y_target.T) B = np.linalg.inv(np.dot(X, Xt) + regression_parameter*np.identity(1+self.n_input+self.n_reservoir)) self._WOut = np.dot(B, A) self._WOut = self._WOut.T """ # calculate the training prediction now train_prediction = self.out_activation( B.dot(self._WOut, self._X).T) elif self._solver in [ "sklearn_auto", "sklearn_lsqr", "sklearn_sag", "sklearn_svd", ]: mode = self._solver[8:] params = self._regressionParameters params["solver"] = mode self._ridgeSolver = Ridge(**params) self._ridgeSolver.fit(self._X.T, Y_target.T) # calculate the training prediction now train_prediction = self.out_activation( self._ridgeSolver.predict(self._X.T)) elif self._solver in ["sklearn_svr", "sklearn_svc"]: self._ridgeSolver = SVR(**self._regressionParameters) self._ridgeSolver.fit(self._X.T, Y_target.T.flatten()) # calculate the training prediction now train_prediction = self.out_activation( self._ridgeSolver.predict(self._X.T)) # calculate the training error now # flatten the outputData outputData = outputData[:, transientTime:, :].reshape(totalLength, -1) training_error = B.sqrt(B.mean((train_prediction - outputData)**2)) return training_error