Ejemplo n.º 1
0
    def regular_graph(degree, seed=1):
        i2 = CompositeInitializer().with_seed(seed).uniform()
        i = CompositeInitializer() \
            .with_seed(seed) \
            .uniform() \
            .regular_graph(degree) \
            .spectral_normalize() \
            .scale(1.0)

        w = WeightInitializer()
        w.weight_hh_init = i
        w.weight_ih_init = i2
        return w
Ejemplo n.º 2
0
    def init_parameters(self, initializer: WeightInitializer):
        self.weight_ih = nn.Parameter(data=initializer.init_weight_ih(
            weight=torch.Tensor(self.hidden_size, self.input_size), ),
                                      requires_grad=self.requires_grad)

        self.weight_hh = nn.Parameter(data=initializer.init_weight_hh(
            weight=torch.Tensor(self.hidden_size, self.hidden_size), ),
                                      requires_grad=self.requires_grad)

        if self.bias:
            self.bias_ih = nn.Parameter(data=initializer.init_bias_ih(
                bias=torch.Tensor(self.hidden_size), ),
                                        requires_grad=self.requires_grad)
            self.bias_hh = nn.Parameter(data=initializer.init_bias_hh(
                bias=torch.Tensor(self.hidden_size), ),
                                        requires_grad=self.requires_grad)
Ejemplo n.º 3
0
    def watts_strogatz_graph(neighbours, rewire_proba, seed=1):
        input_weights = CompositeInitializer().with_seed(seed).uniform()
        hidden_weight = CompositeInitializer() \
            .with_seed(seed) \
            .uniform() \
            .watts_strogatz(neighbours=neighbours, rewire_proba=rewire_proba) \
            .spectral_normalize() \
            .scale(1.0)

        return WeightInitializer(
            weight_hh_init=hidden_weight,
            weight_ih_init=input_weights
        )
Ejemplo n.º 4
0
 def __init__(self,
              readout,
              input_size: int = 1,
              hidden_size: int = 500,
              bias: bool = False,
              initializer: WeightInitializer = WeightInitializer(),
              num_layers=2,
              activation: Activation = A.self_normalizing_default(),
              washout: int = 30):
     super().__init__(reservoir=MultiTimeSeriesHandler(
         DeepESNCell(input_size, hidden_size, bias, initializer, num_layers,
                     activation)),
                      readout=readout,
                      washout=washout)
Ejemplo n.º 5
0
 def __init__(self,
              input_size: int,
              hidden_size: int,
              bias: bool,
              initializer: WeightInitializer = WeightInitializer(),
              num_chunks: int = 1,
              requires_grad: bool = False,
              init: bool = True):
     super(ESNCellBase, self).__init__()
     self.input_size = input_size
     self.hidden_size = hidden_size
     self.requires_grad = requires_grad
     self.bias = bias
     self.weight_ih, self.weight_hh, self.bias_ih, self.bias_hh = None, None, None, None
     if init:
         self.init_parameters(initializer)
Ejemplo n.º 6
0
 def __init__(self,
              input_size: int,
              hidden_size: int,
              bias: bool = True,
              initializer: WeightInitializer = WeightInitializer(),
              activation: Activation = A.tanh(),
              num_chunks: int = 1,
              requires_grad: bool = False):
     super(ESNCell, self).__init__(input_size,
                                   hidden_size,
                                   bias,
                                   initializer=initializer,
                                   num_chunks=num_chunks,
                                   requires_grad=requires_grad)
     self.requires_grad = requires_grad
     self.activation = activation
     self.hx = None
Ejemplo n.º 7
0
 def __init__(self,
              input_size: int = 1,
              hidden_size: int = 250,
              output_dim: int = 1,
              bias: bool = False,
              initializer: WeightInitializer = WeightInitializer(),
              groups=4,
              activation: Activation = A.self_normalizing_default(),
              washout: int = 30,
              regularization: float = 1.):
     super().__init__(reservoir=MultiTimeSeriesHandler(
         GroupOfESNCell(input_size, hidden_size, groups, activation, bias,
                        initializer)),
                      readout=SVDReadout(hidden_size * groups,
                                         output_dim,
                                         regularization=regularization),
                      washout=washout)
Ejemplo n.º 8
0
 def __init__(self,
              input_size: int = 1,
              hidden_size: int = 500,
              output_dim: int = 1,
              bias: bool = False,
              initializer: WeightInitializer = WeightInitializer(),
              num_layers=2,
              activation=A.self_normalizing_default(),
              washout: int = 30,
              regularization: float = 1.):
     super().__init__(reservoir=MultiTimeSeriesHandler(
         DeepESNCell(input_size, hidden_size, bias, initializer, num_layers,
                     activation)),
                      readout=SVDReadout(hidden_size * num_layers,
                                         output_dim,
                                         regularization=regularization),
                      washout=washout)
Ejemplo n.º 9
0
def regular_graph_initializer(seed, degree):
    # initialize input weights with uniform distribution from -1 to 1 and specified seed to reproduce results
    input_weight = CompositeInitializer().with_seed(seed).uniform()

    # specified operations will be done one by one, so this "builder" can be seen as a list of transforms
    # first set the seed and start with uniform distribution
    # then treat the newly created dense matrix as adjacency matrix and transform it into regular graph with
    # desired degree, then apply spectral normalization, so that spectral radius is 1.
    # at the end scale the matrix by factor 0.9 and the initialization is done
    reservoir_weight = CompositeInitializer() \
        .with_seed(seed) \
        .uniform() \
        .regular_graph(degree) \
        .spectral_normalize() \
        .scale(0.9)

    return WeightInitializer(weight_ih_init=input_weight,
                             weight_hh_init=reservoir_weight)
Ejemplo n.º 10
0
    def __init__(self,
                 input_size: int,
                 hidden_size: int,
                 bias=False,
                 initializer: WeightInitializer = WeightInitializer(),
                 num_layers: int = 1,
                 activation: Activation = 'default'):
        super().__init__()
        if type(activation) != list:
            activation = [activation] * num_layers
        else:
            activation = activation

        self.layers = [
            ESNCell(input_size, hidden_size, bias, initializer, activation[0])
        ]
        if num_layers > 1:
            self.layers += [
                ESNCell(hidden_size, hidden_size, bias, initializer,
                        activation[i]) for i in range(1, num_layers)
            ]
        self.gpu_enabled = False
Ejemplo n.º 11
0
    def __init__(self,
                 input_size: int,
                 hidden_size: int,
                 groups,
                 activation=A.self_normalizing_default(),
                 bias: bool = False,
                 initializer: WeightInitializer = WeightInitializer()):
        super(GroupOfESNCell, self).__init__()
        num_groups = groups if type(groups) == int else len(groups)
        if type(activation) != list:
            activation = [activation] * num_groups
        else:
            activation = activation
        if type(groups) != int:
            self.groups = groups
        else:
            self.groups = [
                ESNCell(input_size, hidden_size, bias, initializer,
                        activation[i]) for i in range(groups)
            ]

        self.hidden_size = hidden_size
        self.gpu_enabled = False
Ejemplo n.º 12
0
 def __init__(self,
              input_size: int = 1,
              hidden_size: int = 250,
              output_dim: int = 1,
              bias: bool = False,
              initializer: WeightInitializer = WeightInitializer(),
              groups=2,
              num_layers=(2, 2),
              activation: Activation = A.self_normalizing_default(),
              washout: int = 30,
              regularization: float = 1.,
              network_size=None):
     hidden_size = hidden_size if network_size is None else network_size // sum(
         num_layers)
     super().__init__(reservoir=MultiTimeSeriesHandler(
         GroupOfESNCell(input_size, hidden_size, [
             DeepESNCell(input_size, hidden_size, bias, initializer, layers,
                         activation) for layers in num_layers
         ], activation, bias, initializer)),
                      readout=SVDReadout(hidden_size * groups,
                                         output_dim,
                                         regularization=regularization),
                      washout=washout)
Ejemplo n.º 13
0
sunspot = dl.loader_explicit(Sunspot, test_size=600)
nrmse = NRMSELoss()
if norm:
    X, X_test, y, y_test, centr, spread = dl.norm_loader__(sunspot)
    y_test = spread * y_test + centr
else:
    X, X_test, y, y_test = sunspot()

i = CompositeInitializer()\
    .with_seed(12)\
    .uniform()\
    .regular_graph(4)\
    .spectral_normalize()\
    .scale(0.9)

w = WeightInitializer()
w.weight_hh_init = i

esn = DeepESN(initializer= w, hidden_size=500, num_layers=2)
start = time.time()
# esn.to_cuda()
esn.fit(X, y)

if norm:
    output = spread * esn(X_test) + centr
else:
    output = esn(X_test)
print(time.time()-start)
n = nrmse(output, y_test).item()
print(n)
last = 50