def test_log_scale(self): original_image_name = format_image_name("log_scale.png") original_image = os.path.join(IMGDIR, original_image_name) with image_comparison(original_image) as fig: ax = fig.add_subplot(1, 1, 1) network = reproducible_network_train(step=0.3) plots.error_plot(network, logx=True, ax=ax, show=False)
def test_error_plot_and_validation_error_warnings(self): with catch_stdout() as out: network = algorithms.GradientDescent((2, 3, 1), verbose=True) network.errors = ErrorHistoryList([1, 2]) network.validation_errors = ErrorHistoryList([None]) plots.error_plot(network, ax=None, show=False) terminal_output = out.getvalue() self.assertIn("error will be ignored", terminal_output)
def test_plot_with_validation_dataset(self): original_image_name = format_image_name("with_validation.png") original_image = os.path.join(IMGDIR, original_image_name) with image_comparison(original_image) as fig: ax = fig.add_subplot(1, 1, 1) x_train, x_test, y_train, y_test = simple_classification() gdnet = algorithms.GradientDescent((10, 12, 1), step=0.25) gdnet.train(x_train, y_train, x_test, y_test, epochs=100) plots.error_plot(gdnet, ax=ax, show=False)
def test_error_plot_show_image(self): def mock_plt_show(): pass # Test suppose not to fail real_plt_show = plt.show plt.show = mock_plt_show network = reproducible_network_train(step=0.3) plots.error_plot(network, show=True) plt.show = real_plt_show
def test_error_plot_ax_none(self): ax = plt.gca() network = algorithms.GradientDescent((2, 3, 1)) ax_returned = plots.error_plot(network, ax=None, show=False) self.assertIs(ax_returned, ax)
def run_neural_net(): import_modules() dataset = datasets.load_boston() data, target = dataset.data, dataset.target data_scalar = preprocessing.MinMaxScaler() target_scalar = preprocessing.MinMaxScaler() data = data_scalar.fit_transform(data) target = target_scalar.fit_transform(target.reshape(-1, 1)) environment.reproducible() x_train, x_test, y_train, y_test = train_test_split(data, target, train_size=0.85) cgnet = algorithms.ConjugateGradient( connection=[ layers.Input(13), layers.Sigmoid(75), layers.Sigmoid(25), layers.Sigmoid(1), ], search_method='golden', show_epoch=1, verbose=True, addons=[algorithms.LinearSearch], ) cgnet.train(x_train, y_train, x_test, y_test, epochs=30) plots.error_plot(cgnet) y_predict = cgnet.predict(x_test).round(1) error = rmsle(target_scalar.invers_transform(y_test), \ target_scalar.invers_transform(y_predict)) return (error)
def go(self): raw = self.datafile.read().splitlines() data = self._prepare_data(raw[::2]) target = self._prepare_target(raw[1::2]) print(len(data)) print(len(target)) environment.reproducible() x_train, x_test, y_train, y_test = train_test_split(data, target, train_size=0.85) print(x_train[0]) connections = [ layers.Input(100), layers.Linear(200), layers.Sigmoid(150), layers.Sigmoid(5), ] cgnet = algorithms.ConjugateGradient( connection=connections, search_method='golden', show_epoch=25, verbose=True, addons=[algorithms.LinearSearch], ) cgnet.train(x_train, y_train, x_test, y_test, epochs=100) plots.error_plot(cgnet) y_predict = cgnet.predict(x_test).round(1) error = rmsle(y_test, y_predict) print(error) with open('lib/net/base_searcher.pickle', 'wb') as f: pickle.dump(cgnet, f)
layers.Relu(100), layers.Relu(1), ], search_method='golden', show_epoch=1, verbose=True, addons=[algorithms.LinearSearch], step=0.01, epoch_end_signal=on_epoch_end, error='rmse', ) cgnet.train(training_X, training_Y, testing_X, testing_Y, epochs=1000) from neupy import plots plots.error_plot(cgnet) y_predicted = cgnet.predict(testing_X) print("RMSE = " + str(estimators.rmse(y_predicted, testing_Y.ravel()))) print("MAE = " + str(estimators.mae(y_predicted, testing_Y.ravel()))) actual_mae = y_data_scaler.inverse_transform( estimators.mae(y_predicted, testing_Y)) print("MAE (no. of shares) = " + str(actual_mae.squeeze())) # # SOM try out # from neupy import algorithms # # num_epochs = 100 # num_clusters = 3 # num_features = training_X.shape[1] # # sofm = algorithms.SOFM(n_inputs=num_features, n_outputs=num_clusters, step=0.1, learning_radius=0, verbose=True,
import numpy as np from neupy import algorithms, plots x_train = np.array([[1, 2], [3, 4]]) y_train = np.array([[1], [0]]) lmnet = algorithms.LevenbergMarquardt((2, 3, 1)) lmnet.train(x_train, y_train) plots.error_plot(lmnet)
], batch_size=128, step=0.1, # Using Mean Squared Error as the Loss Function error='mse', # Learning Rate #step=1.0, # Display network data in console verbose=True, # shuffle training data random before each epoch shuffle_data=True, show_epoch=1) # Show network architecture in console network.architecture() network.train(x_train, y_train, x_test, y_test, epochs=70) plots.error_plot(network) # Making test filters MIN_POWER = np.min(x_train) MAX_POWER = np.max(x_train) FrequencyTestArray = np.empty([500, 500], dtype='float32') FrequencyTestArray[:, :] = MAX_POWER np.fill_diagonal(FrequencyTestArray[0:500, :], MIN_POWER) DoubleTestArray = np.empty([250, 500], dtype='float32') DoubleTestArray[:, :] = MAX_POWER for i in range(0, 250): DoubleTestArray[i, i] = MIN_POWER DoubleTestArray[i, -(i + 1)] = MIN_POWER
def show_plow(self): plots.error_plot(self.network)
#se crea la red neuronal con la arquitectura 57 -7 -1 rpropnet = algorithms.RPROP( [ layers.Input(57), layers.Sigmoid(7), layers.Sigmoid(1), ], error='mse', verbose=True, shuffle_data=True, maxstep=1, minstep=1e-7, ) #se realiza el entrenamiento de la red rpropnet.train(input_train=x_train,target_train=y_train,epochs=200) #se muestra un grafico de los errores cometidos en el entrenamiento plots.error_plot(rpropnet) y_train_predicted = rpropnet.predict(x_train).round() y_test_predicted = rpropnet.predict(x_test).round() # se muestran las predicciones print(metrics.classification_report(y_train_predicted, y_train)) print(metrics.confusion_matrix(y_train_predicted, y_train)) print() print(metrics.classification_report(y_test_predicted, y_test)) print(metrics.confusion_matrix(y_test_predicted, y_test))