def test_iris_ensemble_iterative_regression(): print('\ntest_iris_ensemble_iterative_regression():') X_train, X_test, y_train, y_test = train_test_split(X_iris, y_iris, test_size=5, random_state=42) cls = ESNClassifier(input_to_node=[('tanh', InputToNode(hidden_layer_size=10, random_state=42, activation='identity')), ('bounded_relu', InputToNode(hidden_layer_size=10, random_state=42, activation='bounded_relu')) ], node_to_node=[('default', NodeToNode(hidden_layer_size=20, spectral_radius=0.0))], regressor=IncrementalRegression(alpha=.01), random_state=42) for samples in np.split(np.arange(0, X_train.shape[0]), 5): cls.partial_fit(X_train[samples, :], y_train[samples], classes=np.arange(3, dtype=int)) y_predicted = cls.predict(X_test) for record in range(len(y_test)): print('predicted: {0} \ttrue: {1}'.format(y_predicted[record], y_test[record])) print('score: {0}'.format(cls.score(X_test, y_test))) print('proba: {0}'.format(cls.predict_proba(X_test))) print('log_proba: {0}'.format(cls.predict_log_proba(X_test))) assert cls.score(X_test, y_test) >= 4. / 5.
def test_esn_classifier_partial_fit() -> None: X, y = load_digits(return_X_y=True, as_sequence=True) esn = ESNClassifier(hidden_layer_size=50, verbose=True) for k in range(10): esn.partial_fit(X[k], np.repeat(y[k], 8), classes=np.arange(10), postpone_inverse=True) print(esn.__sizeof__()) print(esn.hidden_layer_state) esn = ESNClassifier(hidden_layer_size=50, regressor=Ridge()) with pytest.raises(BaseException): for k in range(10): esn.partial_fit(X[k], np.repeat(y[k], 8), classes=np.arange(10), postpone_inverse=True)
# In[ ]: for params in ParameterGrid(grid): print(params) input_to_node = clone(base_input_to_node) node_to_node = clone(base_node_to_node) node_to_node.set_params(**params) esn = ESNClassifier(input_to_node=input_to_node, node_to_node=node_to_node, regressor=FastIncrementalRegression(alpha=5e-3), random_state=10) for X, y in zip(X_train, y_train): y = np.repeat(y, repeats=8, axis=0) esn.partial_fit(X=X, y=y.reshape(-1, 1), classes=range(10)) err_train = [] for X, y in zip(X_train, y_train): y = np.repeat(np.atleast_2d(y), repeats=8, axis=0) y_pred = esn.predict(X=X) err_train.append(zero_one_loss(y, y_pred)) err_test = [] for X, y in zip(X_test, y_test): y = np.repeat(np.atleast_2d(y), repeats=8, axis=0) y_pred = esn.predict(X=X) err_test.append(zero_one_loss(y, y_pred)) print('{0}\t{1}'.format(np.mean(err_train), np.mean(err_test))) # ## Update parameter of the basic ESN # # After optimizing bias and leakage, we update our basic ESN with the identified values for bias and leakage.