Exemple #1
0
 def __init__(self,
              readout,
              input_size: int = 1,
              hidden_size: int = 500,
              bias: bool = False,
              initializer: WeightInitializer = WeightInitializer(),
              num_layers=2,
              activation: Activation = A.self_normalizing_default(),
              washout: int = 30):
     super().__init__(reservoir=MultiTimeSeriesHandler(
         DeepESNCell(input_size, hidden_size, bias, initializer, num_layers,
                     activation)),
                      readout=readout,
                      washout=washout)
Exemple #2
0
 def __init__(self,
              input_size: int = 1,
              hidden_size: int = 250,
              output_dim: int = 1,
              bias: bool = False,
              initializer: WeightInitializer = WeightInitializer(),
              groups=4,
              activation: Activation = A.self_normalizing_default(),
              washout: int = 30,
              regularization: float = 1.):
     super().__init__(reservoir=MultiTimeSeriesHandler(
         GroupOfESNCell(input_size, hidden_size, groups, activation, bias,
                        initializer)),
                      readout=SVDReadout(hidden_size * groups,
                                         output_dim,
                                         regularization=regularization),
                      washout=washout)
Exemple #3
0
 def __init__(self,
              input_size: int = 1,
              hidden_size: int = 500,
              output_dim: int = 1,
              bias: bool = False,
              initializer: WeightInitializer = WeightInitializer(),
              num_layers=2,
              activation=A.self_normalizing_default(),
              washout: int = 30,
              regularization: float = 1.):
     super().__init__(reservoir=MultiTimeSeriesHandler(
         DeepESNCell(input_size, hidden_size, bias, initializer, num_layers,
                     activation)),
                      readout=SVDReadout(hidden_size * num_layers,
                                         output_dim,
                                         regularization=regularization),
                      washout=washout)
Exemple #4
0
    def __init__(self,
                 input_size: int,
                 hidden_size: int,
                 groups,
                 activation=A.self_normalizing_default(),
                 bias: bool = False,
                 initializer: WeightInitializer = WeightInitializer()):
        super(GroupOfESNCell, self).__init__()
        num_groups = groups if type(groups) == int else len(groups)
        if type(activation) != list:
            activation = [activation] * num_groups
        else:
            activation = activation
        if type(groups) != int:
            self.groups = groups
        else:
            self.groups = [
                ESNCell(input_size, hidden_size, bias, initializer,
                        activation[i]) for i in range(groups)
            ]

        self.hidden_size = hidden_size
        self.gpu_enabled = False
Exemple #5
0
 def __init__(self,
              input_size: int = 1,
              hidden_size: int = 250,
              output_dim: int = 1,
              bias: bool = False,
              initializer: WeightInitializer = WeightInitializer(),
              groups=2,
              num_layers=(2, 2),
              activation: Activation = A.self_normalizing_default(),
              washout: int = 30,
              regularization: float = 1.,
              network_size=None):
     hidden_size = hidden_size if network_size is None else network_size // sum(
         num_layers)
     super().__init__(reservoir=MultiTimeSeriesHandler(
         GroupOfESNCell(input_size, hidden_size, [
             DeepESNCell(input_size, hidden_size, bias, initializer, layers,
                         activation) for layers in num_layers
         ], activation, bias, initializer)),
                      readout=SVDReadout(hidden_size * groups,
                                         output_dim,
                                         regularization=regularization),
                      washout=washout)
Exemple #6
0
import torch
from matplotlib import pyplot as plt

import auto_esn.utils.dataset_loader as dl
from auto_esn.datasets.df import MackeyGlass
from auto_esn.esn.esn import GroupedDeepESN
from auto_esn.esn.reservoir.activation import self_normalizing_default
from auto_esn.esn.reservoir.util import NRMSELoss

mg17clean = dl.loader_explicit(MackeyGlass, test_size=400)
nrmse = NRMSELoss()

X, X_test, y, y_test = mg17clean()

# now choose activation and configure it
activation = self_normalizing_default(spectral_radius=100)

# initialize the esn
esn_original = GroupedDeepESN(
    groups=2,  # choose number of groups
    num_layers=(3, 3),  # choose number of layers for each group
    hidden_size=80,  # choose hidden size for all reservoirs
    activation=activation  # assign activation
)

# fit
esn_original.fit(X, y)

# save model
with open('esn_model.pkl', 'wb') as fn:
    torch.save(esn_original, fn)
Exemple #7
0
from auto_esn.esn.reservoir.activation import self_normalizing_default
from auto_esn.esn.reservoir.util import NRMSELoss

mg17clean = dl.loader_explicit(MackeyGlass, test_size=400)
nrmse = NRMSELoss()

X, X_test, y, y_test = mg17clean()

print(f"Size of X: {X.shape}, X_test: {X_test.shape}")
# double the dimensionality of test and train input
X = torch.cat((X, X), dim=1)
X_test = torch.cat((X_test, X_test), dim=1)
print(f"Size of doubled X: {X.shape}, X_test: {X_test.shape}")

# now choose activation and configure it
activation = self_normalizing_default(leaky_rate=1.0, spectral_radius=500)

# initialize the esn
esn = GroupedDeepESN(
    # You need to specify the dimensionality of the input
    # it will later be checked whether the data provided matches the declared shape
    input_size=2,
    groups=3,
    num_layers=(2, 2, 3),
    hidden_size=80,
    activation=activation)

# fit
esn.fit(X, y)
# predict
output = esn(X_test)
Exemple #8
0
    def fit(self, X: Tensor, y: Tensor, X_val: Tensor, y_val: Tensor):
        start = time.time()

        sample_no = 0
        results: List[Tuple[float, Tensor]] = []
        models = []
        while sample_no < self.max_samples and time.time(
        ) - start < self.max_time_sec:
            size, layers, leaky = self.size_gen(), self.layer_gen(
            ), self.leaky_gen()
            logging.info(
                f"sample no.{sample_no} with layers ={layers}, size={size}, leaky={leaky}"
            )
            esn = DeepESN(
                num_layers=layers,
                hidden_size=size,
                activation=activation.self_normalizing_default(
                    spectral_radius=100.0, leaky_rate=leaky),
                # readout = AutoNNReadout(input_dim=layers*size, lr=1e-4, epochs=1700)
            )
            esn.fit(X, y)
            output = esn(X_val)

            act_metric = self.metric(output.unsqueeze(-1), y_val).item()
            logging.info(
                f"sample no.{sample_no} trained with {self.metric.__name__} = {act_metric} "
            )
            results.append((act_metric, output.unsqueeze(-1)))
            models.append(esn)

            sample_no += 1

        results = sorted(results, key=lambda x: x[0])  # todo handle norm data
        if self.nbest > 0:
            used = set(range(self.nbest))
            self.models = [models[i] for i in used]
            curr_out = sum([results[i][1] for i in used]) / len(used)
            logging.info(
                f"grouping improved {results[0][0]} to {self.metric(curr_out,y_val)} by merging models: {used}"
            )
            return
        if self.fast:
            best_metric = results[0][0]
            used = {0}
        else:

            grid = [(i, j,
                     self.metric((results[i][1] + results[j][1]) / 2, y_val))
                    for i in range(len(results) - 1)
                    for j in range(i, len(results))]
            min_grid = min(grid, key=lambda x: x[2])
            best_metric = min_grid[2]
            used = {min_grid[0], min_grid[1]}
        clean_pass = False
        while not clean_pass:
            curr_out = sum([results[i][1] for i in used])
            new_groups = [(i,
                           self.metric(
                               (results[i][1] + curr_out) / (len(used) + 1),
                               y_val).item())
                          for i in set(range(len(results))).difference(used)]
            best_idx, best_curr = max(new_groups, key=lambda x: x[1])
            if best_curr < best_metric:
                used.add(best_idx)
                best_metric = best_curr
            else:
                clean_pass = True

        self.models = [models[i] for i in used]
        logging.info(
            f"grouping improved {results[0][0]} to {best_metric} by merging models: {used}"
        )