def __init__( self, input_size: int, bias=True, initializer: SubreservoirWeightInitializer = SubreservoirWeightInitializer(), num_layers: int = 1, activation: Activation = A.tanh()): super().__init__(input_size, None, bias, initializer, 0, activation) self.layers = [SubreservoirCell(input_size, bias, initializer, activation, input_cell=True)] + \ [SubreservoirCell(initializer.subreservoir_size, bias, initializer, activation) for _ in range(1, num_layers)] self.subreservoir_size = initializer.subreservoir_size self.hidden_size = initializer.subreservoir_size
def __init__(self, input_size: int, hidden_size: int, bias: bool = True, initializer: WeightInitializer = WeightInitializer(), activation: Activation = A.tanh(), num_chunks: int = 1, requires_grad: bool = False): super(ESNCell, self).__init__(input_size, hidden_size, bias, initializer=initializer, num_chunks=num_chunks, requires_grad=requires_grad) self.requires_grad = requires_grad self.activation = activation self.hx = None
def __init__(self, input_size: int, hidden_size: int, output_dim: int = 1, bias: bool = True, initializer: WeightInitializer = None, num_layers=1, activation: Activation = A.tanh(), transient: int = 30, reglarization: float = 1.): super().__init__(reservoir=DeepESNCell(input_size, hidden_size, bias, initializer, num_layers, activation), readout=SVDReadout(hidden_size * num_layers, output_dim, regularization=reglarization), transient=transient)
def __init__( self, input_size: int, bias: bool = True, initializer: SubreservoirWeightInitializer = SubreservoirWeightInitializer(), activation: Activation = A.tanh(), requires_grad: bool = False, input_cell=False): super(SubreservoirCell, self).__init__(input_size, initializer.subreservoir_size, bias, initializer=initializer, num_chunks=1, activation=activation, requires_grad=requires_grad) self.input_cell = input_cell
def __init__(self, input_size: int, hidden_size: int, bias=True, initializer: WeightInitializer = WeightInitializer(), num_layers: int = 1, activation: Activation = A.tanh()): super().__init__() self.activation = activation if num_layers > 0: self.layers = [ESNCell(input_size, hidden_size, bias, initializer, activation)] + \ [ESNCell(hidden_size, hidden_size, bias, initializer, activation) for _ in range(1, num_layers)] self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.initializer = initializer self.activation = activation
def __init__( self, input_size: int, output_dim: int = 1, bias: bool = True, initializer: SubreservoirWeightInitializer = SubreservoirWeightInitializer(), num_layers=1, activation: Activation = A.tanh(), transient: int = 30, regularization: float = 1.): super().__init__( reservoir=DeepSubreservoirCell(input_size, bias, initializer, num_layers, activation), readout=SVDReadout(initializer.subreservoir_size * num_layers, output_dim, regularization=regularization), transient=transient) self.output_dim = output_dim self.regularization = regularization self.hidden_size = self.reservoir.hidden_size
# esn.fit(X, y) # pred = esn(X_test) # plt.plot(range(378), pred.view(-1).detach().numpy(), 'y') # # plt.plot(range(378), y_test.view(-1).detach().numpy(), 'b') # plt.show() # esn.reset_hidden() # esn.grow() # from esn.util import fit_transform_DSESN esn = DeepSubreservoirESN( 1, 1, initializer=SubreservoirWeightInitializer(subreservoir_size=20), num_layers=3, bias=False, activation=A.tanh(leaky_rate=0.6), transient=60) metric = torch.nn.MSELoss() trained_esn, output = fit_transform_DSESN(esn, X, y, X_test, y_test, metric, verbose=1) plt.plot(range(378), output.view(-1).detach().numpy(), 'c') plt.plot(range(378), y_test.view(-1).detach().numpy(), 'b') plt.show()