Beispiel #1
0
def test_esn_regressor_jobs() -> None:
    print('\ntest_esn_regressor_jobs():')
    X, y = mackey_glass(n_timesteps=8000)
    X_train, X_test, y_train, y_test = train_test_split(X, y, shuffle=False)
    param_grid = {
        "input_to_node": [
            InputToNode(bias_scaling=.1,
                        hidden_layer_size=10,
                        input_activation='identity',
                        random_state=42),
            InputToNode(bias_scaling=.1,
                        hidden_layer_size=50,
                        input_activation='identity',
                        random_state=42)
        ],
        "node_to_node": [
            NodeToNode(spectral_radius=0.,
                       hidden_layer_size=10,
                       random_state=42),
            NodeToNode(spectral_radius=1,
                       hidden_layer_size=50,
                       random_state=42)
        ],
        "regressor":
        [IncrementalRegression(alpha=.0001),
         IncrementalRegression(alpha=.01)],
        'random_state': [42]
    }
    esn = GridSearchCV(estimator=ESNRegressor(), param_grid=param_grid)
    esn.fit(X_train.reshape(-1, 1), y_train, n_jobs=2)
    y_esn = esn.predict(X_test.reshape(-1, 1))
    print("tests - esn:\n sin | cos \n {0}".format(y_test - y_esn))
    print("best_params_: {0}".format(esn.best_params_))
    print("best_score: {0}".format(esn.best_score_))
    np.testing.assert_allclose(1, esn.best_score_, atol=1e-1)
Beispiel #2
0
 def __init__(self,
              *,
              input_to_node=None,
              regressor=None,
              chunk_size=None,
              **kwargs):
     if input_to_node is None:
         i2n_params = InputToNode()._get_param_names()
         self.input_to_node = InputToNode(**{
             key: kwargs[key]
             for key in kwargs.keys() if key in i2n_params
         })
     else:
         i2n_params = input_to_node._get_param_names()
         self.input_to_node = input_to_node.set_params(**{
             key: kwargs[key]
             for key in kwargs.keys() if key in i2n_params
         })
     if regressor is None:
         reg_params = IncrementalRegression()._get_param_names()
         self.regressor = IncrementalRegression(**{
             key: kwargs[key]
             for key in kwargs.keys() if key in reg_params
         })
     else:
         reg_params = regressor._get_param_names()
         self.regressor = regressor.set_params(**{
             key: kwargs[key]
             for key in kwargs.keys() if key in reg_params
         })
     self._chunk_size = chunk_size
Beispiel #3
0
def test_linear():
    print('\ntest_linear():')
    rs = np.random.RandomState(42)
    index = range(1000)
    X = np.hstack(
        (np.linspace(0., 10.,
                     1000).reshape(-1, 1), np.linspace(-1., 1.,
                                                       1000).reshape(-1, 1),
         rs.random(1000).reshape(-1, 1)))
    transformation = rs.random(size=(3, 2))
    y = np.matmul(X, transformation)

    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=10,
                                                        random_state=42)
    reg = IncrementalRegression()
    assert is_regressor(reg)

    for prt in np.array_split(index, 3):
        reg.partial_fit(X[prt, :], y[prt, :])

    y_reg = reg.predict(X_test)
    print("tests: {0}\nregr: {1}".format(y_test, y_reg))
    np.testing.assert_allclose(y_reg, y_test, rtol=.01, atol=.15)
Beispiel #4
0
def test_elm_regressor_jobs():
    print('\ntest_elm_regressor_jobs():')
    X = np.linspace(0, 10, 2000)
    y = np.hstack((np.sin(X).reshape(-1, 1), np.cos(X).reshape(-1, 1)))
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=10,
                                                        random_state=42)
    param_grid = {
        'input_to_node': [[('default',
                            InputToNode(bias_scaling=10.,
                                        hidden_layer_size=20,
                                        random_state=42))],
                          [('default',
                            InputToNode(bias_scaling=10.,
                                        hidden_layer_size=50,
                                        random_state=42))]],
        'regressor':
        [IncrementalRegression(alpha=.0001),
         IncrementalRegression(alpha=.01)],
        'random_state': [42]
    }
    elm = GridSearchCV(ELMRegressor(), param_grid)
    elm.fit(X_train.reshape(-1, 1), y_train, n_jobs=2)
    y_elm = elm.predict(X_test.reshape(-1, 1))
    print("tests - elm:\n sin | cos \n {0}".format(y_test - y_elm))
    print("best_params_: ".format(elm.best_params_))
    print("best_score: ".format(elm.best_score_))
    np.testing.assert_allclose(y_test, y_elm, atol=1e-1)
Beispiel #5
0
def test_normalize() -> None:
    print('\ntest_normalize():')
    rs = np.random.RandomState(42)
    X = np.hstack((np.linspace(0., 10., 1000).reshape(-1, 1),
                   np.linspace(-1., 1., 1000).reshape(-1, 1),
                   rs.random(1000).reshape(-1, 1)))
    transformation = rs.random(size=(3, 2))
    y = np.matmul(X, transformation)
    reg = IncrementalRegression(normalize=True)
    reg.fit(X, y)
Beispiel #6
0
def test_iris_ensemble_iterative_regression():
    print('\ntest_iris_ensemble_iterative_regression():')
    X_train, X_test, y_train, y_test = train_test_split(X_iris,
                                                        y_iris,
                                                        test_size=5,
                                                        random_state=42)
    cls = ESNClassifier(input_to_node=[('tanh',
                                        InputToNode(hidden_layer_size=10,
                                                    random_state=42,
                                                    activation='identity')),
                                       ('bounded_relu',
                                        InputToNode(hidden_layer_size=10,
                                                    random_state=42,
                                                    activation='bounded_relu'))
                                       ],
                        node_to_node=[('default',
                                       NodeToNode(hidden_layer_size=20,
                                                  spectral_radius=0.0))],
                        regressor=IncrementalRegression(alpha=.01),
                        random_state=42)

    for samples in np.split(np.arange(0, X_train.shape[0]), 5):
        cls.partial_fit(X_train[samples, :],
                        y_train[samples],
                        classes=np.arange(3, dtype=int))
    y_predicted = cls.predict(X_test)

    for record in range(len(y_test)):
        print('predicted: {0} \ttrue: {1}'.format(y_predicted[record],
                                                  y_test[record]))

    print('score: {0}'.format(cls.score(X_test, y_test)))
    print('proba: {0}'.format(cls.predict_proba(X_test)))
    print('log_proba: {0}'.format(cls.predict_log_proba(X_test)))
    assert cls.score(X_test, y_test) >= 4. / 5.
Beispiel #7
0
def test_compare_ridge() -> None:
    X_train, X_test, y_train, y_test = train_test_split(
        X_diabetes, y_diabetes, test_size=10, random_state=42)

    i_reg = IncrementalRegression(alpha=.01).fit(X_train, y_train)
    ridge = Ridge(alpha=.01, solver='svd').fit(X_train, y_train)

    print("incremental: {0} ridge: {1}".format(i_reg.coef_, ridge.coef_))
    np.testing.assert_allclose(i_reg.coef_, ridge.coef_, rtol=.0001)
Beispiel #8
0
 def __init__(self,
              input_to_node=InputToNode(),
              regressor=IncrementalRegression(alpha=.0001),
              chunk_size=None,
              random_state=None):
     self.input_to_node = input_to_node
     self.random_state = random_state
     self._chunk_size = chunk_size
     self._regressor = regressor
Beispiel #9
0
 def __init__(self,
              input_to_node=InputToNode(),
              regressor=IncrementalRegression(alpha=.0001),
              chunk_size=None,
              random_state=None):
     super().__init__(input_to_node=input_to_node,
                      regressor=regressor,
                      chunk_size=chunk_size,
                      random_state=random_state)
     self._encoder = None
 def __init__(self,
              input_to_node=InputToNode(),
              node_to_node=FeedbackNodeToNode(),
              regressor=IncrementalRegression(alpha=.0001),
              chunk_size=None,
              random_state=None,
              n_jobs=None):
     super().__init__(input_to_node=input_to_node,
                      node_to_node=node_to_node,
                      regressor=regressor,
                      chunk_size=chunk_size,
                      random_state=random_state)
     self.n_jobs = n_jobs
Beispiel #11
0
 def __init__(self,
              *,
              input_to_node=InputToNode(),
              node_to_node=NodeToNode(),
              regressor=IncrementalRegression(alpha=.0001),
              chunk_size=None,
              random_state=None,
              n_jobs=None,
              output_strategy="last_state",
              **kwargs):
     super().__init__(input_to_node=input_to_node,
                      node_to_node=node_to_node,
                      regressor=regressor,
                      chunk_size=chunk_size,
                      random_state=random_state,
                      **kwargs)
     self.n_jobs = n_jobs
     self.output_strategy = output_strategy
Beispiel #12
0
print(len(all_wavs_m))
all_wavs_n = glob.glob(r"C:\Temp\SpLxDataLondonStudents2008\N\*.wav")
print(len(all_wavs_n))

base_input_to_node = InputToNode(hidden_layer_size=500,
                                 input_activation='identity',
                                 k_in=5,
                                 input_scaling=14.6,
                                 bias_scaling=0.0,
                                 random_state=1)
base_node_to_node = NodeToNode(hidden_layer_size=500,
                               spectral_radius=0.8,
                               leakage=0.5,
                               k_rec=16,
                               bidirectional=True,
                               random_state=1)
base_reg = IncrementalRegression(alpha=1.7e-10)

base_esn = ESNRegressor(input_to_node=base_input_to_node,
                        node_to_node=base_node_to_node,
                        regressor=base_reg)

esn = base_esn
t1 = time.time()
Parallel(n_jobs=1, verbose=50)(delayed(train_esn)(
    base_input_to_node, base_node_to_node, base_reg, frame_length, all_wavs_m)
                               for frame_length in [7, 9, 11, 21, 31, 41, 81])
print("Finished in {0} seconds!".format(time.time() - t1))

exit(0)
# In[7]:

base_input_to_nodes = InputToNode(hidden_layer_size=100,
                                  activation='identity',
                                  k_in=1,
                                  input_scaling=0.6,
                                  bias_scaling=0.0)
base_nodes_to_nodes = NodeToNode(hidden_layer_size=100,
                                 spectral_radius=0.9,
                                 leakage=1.0,
                                 bias_scaling=0.0,
                                 k_rec=10)

esn = ESNRegressor(input_to_node=base_input_to_nodes,
                   node_to_node=base_nodes_to_nodes,
                   regressor=IncrementalRegression(alpha=1e-8),
                   random_state=10)

# Training and Prediction.

# In[8]:

X_train = scaler.transform(X[0:train_len])
y_train = scaler.transform(X[1:train_len + 1])
X_test = scaler.transform(X[train_len + 1:-1])
y_test = scaler.transform(X[train_len + 1 + future_len:])

fig = plt.figure()
plt.plot(scaler.transform(X.reshape(-1, 1)))
plt.xlabel("n")
plt.xlim([0, len(X)])
Beispiel #14
0
    'input_to_node__bias_scaling': [0.0],
    'input_to_node__activation': ['identity'],
    'input_to_node__random_state': [42],
    'node_to_node__hidden_layer_size': [50],
    'node_to_node__leakage': [1.0],
    'node_to_node__spectral_radius': np.linspace(start=0.0, stop=1, num=11),
    'node_to_node__bias_scaling': [0.0],
    'node_to_node__activation': ['tanh'],
    'node_to_node__random_state': [42],
    'regressor__alpha': [1e-3],
    'random_state': [42]
}

base_esn = ESNClassifier(input_to_node=InputToNode(),
                         node_to_node=NodeToNode(),
                         regressor=IncrementalRegression())

# ## Optimize input_scaling and spectral_radius
#
# We use the ParameterGrid from scikit-learn, which converts the grid parameters defined before into a list of dictionaries for each parameter combination.
#
# We loop over each entry of the Parameter Grid, set the parameters in reg and fit our model on the training data. Afterwards, we report the error rates on the training and test set.
#
#     The lowest training error rate: 0.536330735; parameter combination: {'input_scaling': 0.1, 'spectral_radius': 1.0}
#     The lowest test error rate: 0.588987764; parameter combination: {'input_scaling': 0.1, 'spectral_radius': 1.0}
#
# We use the best parameter combination from the training set, because we do not want to overfit on the test set.
#
# As we can see in the python call, we have modified the training procedure: We use "partial_fit" in order to present the ESN all sequences independently from each other. The function "partial_fit" is part of the scikit-learn API. We have added one optional argument "update_output_weights". By default, it is True and thus, after feeding one sequence through the ESN, output weights are computed.
#
# However, as this is computationally expensive, we can deactivate computing output weights after each sequence by setting "update_output_weights" to False. Now, we simply collect sufficient statistics for the later linear regression. To finish the training process, we call finalize() after passing all sequences through the ESN.
Beispiel #15
0
train_len = 3000
future_len = 1
future_total = len(prices) - train_len


# Echo State Network preparation

# In[7]:


base_input_to_nodes = InputToNode(hidden_layer_size=100, activation='identity', k_in=1, input_scaling=0.6, bias_scaling=0.0)
base_nodes_to_nodes = NodeToNode(hidden_layer_size=100, spectral_radius=0.9, leakage=1.0, bias_scaling=0.0, k_rec=10)

esn = ESNRegressor(input_to_node=base_input_to_nodes,
                   node_to_node=base_nodes_to_nodes,
                   regressor=IncrementalRegression(alpha=1e-8), random_state=10)


# Training and Prediction.

# In[8]:


train_in = prices[0:train_len, :]
train_out = prices[0+1:train_len+1, :]
test_in = prices[0:train_len+future_total - future_len, :]
test_out = prices[future_len:train_len+future_total, :]

esn.fit(X=train_in, y=train_out.ravel())
train_pred = esn.predict(X=train_in)
test_pred = esn.predict(X=test_in)
Beispiel #16
0
class ESNRegressor(BaseEstimator, MultiOutputMixin, RegressorMixin):
    """
    Echo State Network regressor.

    This model optimizes the mean squared error loss function using linear regression.

    Parameters
    ----------
    input_to_node : iterable, default=[('default', InputToNode())]
        List of (name, transform) tuples (implementing fit/transform) that are
        chained, in the order in which they are chained, with the last object
        an estimator.
    node_to_node : iterable, default=[('default', NodeToNode())]
        List of (name, transform) tuples (implementing fit/transform) that are
        chained, in the order in which they are chained, with the last object
        an estimator.
    regressor : object, default=IncrementalRegression(alpha=.0001)
        Regressor object such as derived from ``RegressorMixin``. This
        regressor will automatically be cloned each time prior to fitting.
        regressor cannot be None, omit argument if in doubt
    chunk_size : int, default=None
        if X.shape[0] > chunk_size, calculate results incrementally with partial_fit
    kwargs : dict, default = None
        keyword arguments passed to the subestimators if this is desired, default=None
    """
    @_deprecate_positional_args
    def __init__(self,
                 *,
                 input_to_node=None,
                 node_to_node=None,
                 regressor=None,
                 chunk_size=None,
                 **kwargs):
        if input_to_node is None:
            i2n_params = InputToNode()._get_param_names()
            self.input_to_node = InputToNode(**{
                key: kwargs[key]
                for key in kwargs.keys() if key in i2n_params
            })
        else:
            i2n_params = input_to_node._get_param_names()
            self.input_to_node = input_to_node.set_params(**{
                key: kwargs[key]
                for key in kwargs.keys() if key in i2n_params
            })
        if node_to_node is None:
            n2n_params = NodeToNode()._get_param_names()
            self.node_to_node = NodeToNode(**{
                key: kwargs[key]
                for key in kwargs.keys() if key in n2n_params
            })
        else:
            n2n_params = node_to_node._get_param_names()
            self.node_to_node = node_to_node.set_params(**{
                key: kwargs[key]
                for key in kwargs.keys() if key in n2n_params
            })
        if regressor is None:
            reg_params = IncrementalRegression()._get_param_names()
            self.regressor = IncrementalRegression(**{
                key: kwargs[key]
                for key in kwargs.keys() if key in reg_params
            })
        else:
            reg_params = regressor._get_param_names()
            self.regressor = regressor.set_params(**{
                key: kwargs[key]
                for key in kwargs.keys() if key in reg_params
            })
        self._chunk_size = chunk_size

    def get_params(self, deep=True):
        if deep:
            return {
                **self.input_to_node.get_params(),
                **self.node_to_node.get_params(),
                **{
                    "alpha": self.regressor.get_params()["alpha"]
                }
            }
        else:
            return {
                "input_to_node": self.input_to_node,
                "node_to_node": self.node_to_node,
                "regressor": self.regressor,
                "chunk_size": self.chunk_size
            }

    def set_params(self, **parameters):
        i2n_params = self.input_to_node._get_param_names()
        self.input_to_node = self.input_to_node.set_params(**{
            key: parameters[key]
            for key in parameters.keys() if key in i2n_params
        })
        n2n_params = self.node_to_node._get_param_names()
        self.node_to_node = self.node_to_node.set_params(**{
            key: parameters[key]
            for key in parameters.keys() if key in n2n_params
        })
        reg_params = self.regressor._get_param_names()
        self.regressor = self.regressor.set_params(**{
            key: parameters[key]
            for key in parameters.keys() if key in reg_params
        })
        for parameter, value in parameters.items():
            if parameter in self.get_params(deep=False):
                setattr(self, parameter, value)

        return self

    def partial_fit(self,
                    X,
                    y,
                    n_jobs=None,
                    transformer_weights=None,
                    postpone_inverse=False):
        """
        Fits the regressor partially.

        Parameters
        ----------
        X : {ndarray, sparse matrix} of shape (n_samples, n_features)
        y : {ndarray, sparse matrix} of shape (n_samples,) or (n_samples, n_targets)
            The targets to predict.
        n_jobs : int, default=None
            The number of jobs to run in parallel. ``-1`` means using all processors.
            See :term:`Glossary <n_jobs>` for more details.
        transformer_weights : ignored

        Returns
        -------
        self : Returns a traines ESNRegressor model.
        """
        if not hasattr(self._regressor, 'partial_fit'):
            raise BaseException(
                'Regressor has no attribute partial_fit, got {0}'.format(
                    self._regressor))

        self._validate_hyperparameters()
        self._validate_data(X=X, y=y, multi_output=True)

        # input_to_node
        try:
            hidden_layer_state = self._input_to_node.transform(X)
        except NotFittedError as e:
            print('input_to_node has not been fitted yet: {0}'.format(e))
            hidden_layer_state = self._input_to_node.fit_transform(X)
            pass

        # node_to_node
        try:
            hidden_layer_state = self._node_to_node.transform(
                hidden_layer_state)
        except NotFittedError as e:
            print('node_to_node has not been fitted yet: {0}'.format(e))
            hidden_layer_state = self._node_to_node.fit_transform(
                hidden_layer_state)
            pass

        # regression
        if self._regressor:
            self._regressor.partial_fit(
                hidden_layer_state[self.node_to_node.wash_out:, :],
                y[self.node_to_node.wash_out:, :],
                postpone_inverse=postpone_inverse)
        return self

    def fit(self, X, y, n_jobs=None, transformer_weights=None):
        """
        Fits the regressor.

        Parameters
        ----------
        X : {ndarray, sparse matrix} of shape (n_samples, n_features)
        y : {ndarray, sparse matrix} of shape (n_samples,) or (n_samples, n_targets)
            The targets to predict.
        n_jobs : int, default=None
            The number of jobs to run in parallel. ``-1`` means using all processors.
            See :term:`Glossary <n_jobs>` for more details.
        transformer_weights : ignored

        Returns
        -------
        self : Returns a trained ESNRegressor model.
        """
        self._validate_hyperparameters()
        self._validate_data(X, y, multi_output=True)
        self._input_to_node.fit(X)
        self._node_to_node.fit(self._input_to_node.transform(X))
        self._regressor = self._regressor.__class__()

        if self._chunk_size is None or self._chunk_size > X.shape[0]:
            # input_to_node
            hidden_layer_state = self._input_to_node.transform(X)
            hidden_layer_state = self._node_to_node.transform(
                hidden_layer_state)

            # regression
            self._regressor.fit(
                hidden_layer_state[self.node_to_node.wash_out:, :],
                y[self.node_to_node.wash_out:, :])

        elif self._chunk_size < X.shape[0]:
            # setup chunk list
            chunks = list(range(0, X.shape[0], self._chunk_size))
            # postpone inverse calculation for chunks n-1
            for idx in chunks[:-1]:
                ESNRegressor.partial_fit(
                    self,
                    X=X[idx:idx + self._chunk_size, ...],
                    y=y[idx:idx + self._chunk_size, ...],
                    n_jobs=n_jobs,
                    transformer_weights=transformer_weights,
                    postpone_inverse=True)
            # last chunk, calculate inverse and bias
            ESNRegressor.partial_fit(self,
                                     X=X[chunks[-1]:, ...],
                                     y=y[chunks[-1]:, ...],
                                     n_jobs=n_jobs,
                                     transformer_weights=transformer_weights,
                                     postpone_inverse=False)
        else:
            raise ValueError('chunk_size invalid {0}'.format(self._chunk_size))
        return self

    def predict(self, X):
        """
        Predicts the targets using the trained ELM regressor.

        Parameters
        ----------
        X : {ndarray, sparse matrix} of shape (n_samples, n_features)

        Returns
        -------
        y : {ndarray, sparse matrix} of shape (n_samples,) or (n_samples, n_targets)
            The predicted targets
        """
        if self._input_to_node is None or self._regressor is None:
            raise NotFittedError(self)

        hidden_layer_state = self._input_to_node.transform(X)
        hidden_layer_state = self._node_to_node.transform(hidden_layer_state)

        return self._regressor.predict(hidden_layer_state)

    def _validate_hyperparameters(self):
        """Validates the hyperparameters.
        Returns
        -------
        """
        if not (hasattr(self.input_to_node, "fit")
                and hasattr(self.input_to_node, "fit_transform")
                and hasattr(self.input_to_node, "transform")):
            raise TypeError("All input_to_node should be transformers "
                            "and implement fit and transform "
                            "'%s' (type %s) doesn't" %
                            (self.input_to_node, type(self.input_to_node)))

        if not (hasattr(self.node_to_node, "fit")
                and hasattr(self.node_to_node, "fit_transform")
                and hasattr(self.node_to_node, "transform")):
            raise TypeError("All node_to_node should be transformers "
                            "and implement fit and transform "
                            "'%s' (type %s) doesn't" %
                            (self.node_to_node, type(self.node_to_node)))

        if self._chunk_size is not None and (
                not isinstance(self._chunk_size, int) or self._chunk_size < 0):
            raise ValueError('Invalid value for chunk_size, got {0}'.format(
                self._chunk_size))

        if not is_regressor(self._regressor):
            raise TypeError("The last step should be a regressor "
                            "and implement fit and predict"
                            "'%s' (type %s) doesn't" %
                            (self._regressor, type(self._regressor)))

    def __sizeof__(self):
        """Returns the size of the object in bytes.
        Returns
        -------
        size : int
        Object memory in bytes.
        """
        return object.__sizeof__(self) + \
            sys.getsizeof(self._input_to_node) + \
            sys.getsizeof(self._node_to_node) + \
            sys.getsizeof(self._regressor)

    @property
    def regressor(self):
        """Returns the chunk_size, in which X will be chopped.
        Returns
        -------
        chunk_size : int or None
        """
        return self._regressor

    @regressor.setter
    def regressor(self, regressor):
        """Sets the regressor.
        Parameters
        ----------
        regressor : regressor or None
        Returns
        -------
        """
        self._regressor = regressor

    @property
    def input_to_node(self):
        """Returns the input_to_node list or the input_to_node Transformer.
        Returns
        -------
        input_to_node : Transformer or [Transformer]
        """
        return self._input_to_node

    @input_to_node.setter
    def input_to_node(self,
                      input_to_node,
                      n_jobs=None,
                      transformer_weights=None):
        """Sets the input_to_node list or the input_to_node Transformer.
        Parameters
        ----------
        input_to_node : Transformer or [Transformer]
        n_jobs : int, default=None
        Number of jobs to run in parallel.
        None means 1 unless in a joblib.parallel_backend context. -1 means using all processors.
        transformer_weights : dict, default=None
        Multiplicative weights for features per transformer.
        Keys are transformer names, values the weights.
        Raises ValueError if key not present in transformer_list.
        Returns
        -------
        """
        if hasattr(input_to_node, '__iter__'):
            # Feature Union of list of input_to_node
            self._input_to_node = FeatureUnion(
                transformer_list=input_to_node,
                n_jobs=n_jobs,
                transformer_weights=transformer_weights)
        else:
            # single input_to_node
            self._input_to_node = input_to_node

    @property
    def node_to_node(self):
        """Returns the node_to_node list or the input_to_node Transformer.
        Returns
        -------
        node_to_node : Transformer or [Transformer]
        """
        return self._node_to_node

    @property
    def hidden_layer_state(self):
        """Returns the hidden_layer_state, e.g. the resevoir state over time.
        Returns
        -------
        hidden_layer_state : np.ndarray
        """
        return self._node_to_node._hidden_layer_state

    @node_to_node.setter
    def node_to_node(self,
                     node_to_node,
                     n_jobs=None,
                     transformer_weights=None):
        """Sets the input_to_node list or the input_to_node Transformer.
        Parameters
        ----------
        node_to_node : Transformer or [Transformer]
        n_jobs : int, default=None
        Number of jobs to run in parallel.
        None means 1 unless in a joblib.parallel_backend context. -1 means using all processors.
        transformer_weights : dict, default=None
        Multiplicative weights for features per transformer.
        Keys are transformer names, values the weights.
        Raises ValueError if key not present in transformer_list.
        Returns
        -------
        """
        if hasattr(node_to_node, '__iter__'):
            # Feature Union of list of input_to_node
            self._node_to_node = FeatureUnion(
                transformer_list=node_to_node,
                n_jobs=n_jobs,
                transformer_weights=transformer_weights)
        else:
            # single input_to_node
            self._node_to_node = node_to_node

    @property
    def chunk_size(self):
        """Returns the chunk_size, in which X will be chopped.
        Returns
        -------
        chunk_size : int or None
        """
        return self._chunk_size

    @chunk_size.setter
    def chunk_size(self, chunk_size):
        """Sets the chunk_size, in which X will be chopped.
        Parameters
        ----------
        chunk_size : int or None
        Returns
        -------
        """
        self._chunk_size = chunk_size
    ax.set_yticks([])
    ax.set_xlabel("Time")
    ax.set_xticks([])


dataset = np.loadtxt(fname=r"C:\Users\Steiner\Documents\Python\PyRCN\examples\dataset\sine_training.csv", delimiter=",", dtype=float)
X = dataset[:, 0].reshape(-1, 1)
y = dataset[:, 1]

dataset = np.loadtxt(fname=r"C:\Users\Steiner\Documents\Python\PyRCN\examples\dataset\sine_test.csv", delimiter=",", dtype=float)
X_test = dataset[:, 0].reshape(-1, 1)
y_test = dataset[:, 1]

input_to_node = InputToNode(hidden_layer_size=200, activation='identity', input_scaling=3., bias_scaling=0.01, random_state=1)
node_to_node = FeedbackNodeToNode(hidden_layer_size=200, sparsity=0.05, activation='tanh', spectral_radius=0.25, leakage=1.0, bias_scaling=0.0, teacher_scaling=1.12, teacher_shift=-0.7, bi_directional=False, output_activation="tanh", random_state=1)
reg = IncrementalRegression(alpha=1e-3)

esn = FeedbackESNRegressor(input_to_node=input_to_node, node_to_node=node_to_node, regressor=reg, random_state=1)

esn.partial_fit(X=X, y=y.reshape(-1, 1), postpone_inverse=False)

y_pred = esn.predict(X=X)

plt.figure(figsize=(10,1.5))
plt.plot(X, label='Input (Frequency)')
plt.plot(y, label='Target (Sine)')
plt.plot(y_pred, label='Predicted (Sine)')
plt.title('Training')
plt.xlim([0, len(y_pred)])
plt.legend()
Beispiel #18
0
def elm_coates_stacked(directory):
    self_name = 'elm_coates_stacked'
    logger = new_logger(self_name, directory=directory)
    X, y = get_mnist(directory)
    logger.info('Loaded MNIST successfully with {0} records'.format(
        X.shape[0]))

    label_encoder = LabelEncoder().fit(y)
    y_encoded = label_encoder.transform(y)

    # scale X so X in [0, 1]
    X /= 255.

    # setup parameter grid
    param_grid = {
        'chunk_size': [10000],
        'input_scaling': np.logspace(start=-3, stop=1, base=10, num=3),
        'bias_scaling': [0.],  # np.logspace(start=-3, stop=1, base=10, num=6),
        'input_activation': ['relu'],
        'alpha': [1e-5],
        'random_state': [42]
    }

    # read input matrices from files
    list_filepaths = []
    predefined_input_weights = np.empty((784, 0))
    for filepath in glob.glob(os.path.join(directory, '*kmeans1*matrix.npy')):
        logger.info('matrix file found: {0}'.format(filepath))
        list_filepaths.append(filepath)
        predefined_input_weights = np.append(predefined_input_weights,
                                             np.load(filepath),
                                             axis=1)

    # setup estimator
    estimator = ELMClassifier(
        PredefinedWeightsInputToNode(
            predefined_input_weights=predefined_input_weights),
        IncrementalRegression())
    logger.info('Estimator params: {0}'.format(estimator.get_params().keys()))
    # return

    # setup grid search
    cv = GridSearchCV(estimator=estimator,
                      param_grid=param_grid,
                      scoring='accuracy',
                      n_jobs=1,
                      verbose=1,
                      cv=[(np.arange(0,
                                     train_size), np.arange(train_size,
                                                            70000))])

    # run!
    cv.fit(X, y_encoded)
    cv_best_params = cv.best_params_
    del cv_best_params['input_to_nodes__predefined_input_weights']

    # refine best params
    logger.info('best parameters: {0} (score: {1})'.format(
        cv_best_params, cv.best_score_))

    # refine results
    cv_results = cv.cv_results_
    del cv_results['params']
    del cv_results['param_input_to_nodes__predefined_input_weights']

    # save results
    try:
        with open(os.path.join(directory, '{0}.csv'.format(self_name)),
                  'w') as f:
            f.write(','.join(cv_results.keys()) + '\n')
            for row in list(map(list, zip(*cv_results.values()))):
                f.write(','.join(map(str, row)) + '\n')
    except PermissionError as e:
        print('Missing privileges: {0}'.format(e))

    if not list_filepaths:
        logger.warning('no input weights matrices found')
        return