def test_handle_errors(self): data, target = datasets.make_classification(300, n_features=4, n_classes=2) x_train, x_test, y_train, y_test = model_selection.train_test_split( data, target, test_size=0.3) with self.assertRaises(ValueError): # First network has two output layers and the second # just one. algorithms.DynamicallyAveragedNetwork([ algorithms.RPROP((4, 10, 2), step=0.1), algorithms.GradientDescent((4, 10, 1), step=0.1) ]) with self.assertRaises(ValueError): # Use ensemble with less than one network algorithms.DynamicallyAveragedNetwork( [algorithms.GradientDescent((4, 10, 1), step=0.1)]) with self.assertRaises(ValueError): # Output greater than 1 dan = algorithms.DynamicallyAveragedNetwork([ algorithms.GradientDescent([ Input(4), Sigmoid(10), Relu(1, weight=init.Uniform(), bias=init.Uniform()), ], step=0.01), algorithms.RPROP((4, 10, 1), step=0.01), ]) dan.train(x_train, y_train, epochs=10) dan.predict(x_test)
def test_irpropplus(self): options = dict(minstep=0.001, maxstep=1, increase_factor=1.1, decrease_factor=0.1, step=1, verbose=False) uniform = init.Uniform() params1 = dict( weight=uniform.sample((3, 10), return_array=True), bias=uniform.sample((10, ), return_array=True), ) params2 = dict( weight=uniform.sample((10, 2), return_array=True), bias=uniform.sample((2, ), return_array=True), ) network = layers.join( Input(3), Sigmoid(10, **params1), Sigmoid(2, **params2), ) nw = algorithms.IRPROPPlus(copy.deepcopy(network), **options) nw.train(simple_x_train, simple_y_train, epochs=100) irprop_plus_error = nw.errors.train[-1] self.assertGreater(1e-4, nw.errors.train[-1]) nw = algorithms.RPROP(copy.deepcopy(network), **options) nw.train(simple_x_train, simple_y_train, epochs=100) rprop_error = nw.errors.train[-1] self.assertGreater(rprop_error, irprop_plus_error)
def __init__(self, connection, **options): if len(connection) != 2: raise ValueError("This network should contains two layers.") if is_list_of_integers(connection): input_layer_size, output_layer_size = connection connection = Input(input_layer_size) > Step(output_layer_size) if not isinstance(connection, LayerConnection): raise ValueError("Invalid network connection structure.") if not isinstance(connection.output_layer, Step): raise NetworkConnectionError( "Final layer should contains step activation function " "(``layers.Step`` class instance).") super(BaseLinearNetwork, self).__init__(connection, **options)
def __init__(self, connection, **options): if len(connection) != 2: raise ValueError("This network should contains two layers.") if all(isinstance(element, int) for element in connection): input_layer_size, output_layer_size = connection connection = Input(input_layer_size) > Step(output_layer_size) if not isinstance(connection, LayerConnection): raise ValueError("Invalid connection type") output_layer = connection.output_layers[0] if not isinstance(output_layer, Step): raise InvalidConnection( "Final layer should contains step activation function " "(``layers.Step`` class instance).") super(BaseLinearNetwork, self).__init__(connection, **options)
def test_irpropplus(self): options = dict(minstep=0.001, maxstep=1, increase_factor=1.1, decrease_factor=0.1, step=1, verbose=False) connection = [ Input(3), Sigmoid(10, weight=init.Uniform(), bias=init.Uniform()), Sigmoid(2, weight=init.Uniform(), bias=init.Uniform()), ] nw = algorithms.IRPROPPlus(copy.deepcopy(connection), **options) nw.train(simple_input_train, simple_target_train, epochs=100) irprop_plus_error = nw.errors.last() self.assertGreater(1e-4, nw.errors.last()) nw = algorithms.RPROP(copy.deepcopy(connection), **options) nw.train(simple_input_train, simple_target_train, epochs=100) rprop_error = nw.errors.last() self.assertGreater(rprop_error, irprop_plus_error)
train_size = int(t.shape[0] * 0.9) train_size X_train = t[:train_size] y_train = x[:train_size] X_test = t[train_size:] y_test = x[train_size:] scaler_x = StandardScaler() scaler_y = StandardScaler() tmp_train_scaled_x = scaler_x.fit_transform(X_train[:, np.newaxis]) tmp_test_scaled_x = scaler_x.transform(X_test[:, np.newaxis]) tmp_train_scaled_y = scaler_y.fit_transform(y_train[:, np.newaxis]) lmnet = algorithms.LevenbergMarquardt((Input(1), Tanh(60), Linear(1)), verbose=True) lmnet.train(X_train, y_train, epochs=100) pred_x = lmnet.predict(X_train) mse = sklearn.metrics.mean_squared_error(y_train, pred_x.flatten()) print(f'RMSE = {np.sqrt(mse)}') plt.plot(X_train, y_train, label='train') plt.plot(X_train, pred_x, label='predict') plt.legend() pred_x = lmnet.predict(X_test) mse = sklearn.metrics.mean_squared_error(y_test, pred_x.flatten()) print(f'RMSE = {np.sqrt(mse)}')
def setUp(self): super(RPROPTestCase, self).setUp() self.network = Input(3) > Sigmoid(10) > Sigmoid(2)
def setUp(self): super(RPROPTestCase, self).setUp() self.connection = Input(3) > Sigmoid(10) > Sigmoid(2)