コード例 #1
0
ファイル: test_levmarq.py プロジェクト: zeroyou/neupy
    def test_levenberg_marquardt_exceptions(self):
        with self.assertRaises(ValueError):
            algorithms.LevenbergMarquardt(
                layers.Input(2) > layers.Sigmoid(3) > layers.Sigmoid(1),
                loss='categorical_crossentropy')

        with self.assertRaises(ValueError):
            # Doesn't have step parameter
            algorithms.LevenbergMarquardt(
                layers.Input(2) > layers.Sigmoid(3) > layers.Sigmoid(1),
                step=0.01)
コード例 #2
0
    def test_levenberg_marquardt(self):
        dataset = datasets.load_diabetes()
        data, target = dataset.data, dataset.target

        data_scaler = preprocessing.MinMaxScaler()
        target_scaler = preprocessing.MinMaxScaler()

        x_train, x_test, y_train, y_test = train_test_split(
            data_scaler.fit_transform(data),
            target_scaler.fit_transform(target),
            train_size=0.85)

        # Network
        lmnet = algorithms.LevenbergMarquardt(connection=[
            layers.SigmoidLayer(10),
            layers.SigmoidLayer(40),
            layers.OutputLayer(1),
        ],
                                              mu_increase_factor=2,
                                              mu=0.1,
                                              show_epoch=10,
                                              use_bias=False)
        lmnet.train(x_train, y_train, epochs=100)
        y_predict = lmnet.predict(x_test)

        error = rmsle(target_scaler.inverse_transform(y_test),
                      target_scaler.inverse_transform(y_predict).round())
        error

        self.assertAlmostEqual(0.4372, error, places=4)
コード例 #3
0
    def test_levenberg_marquardt(self):
        dataset = datasets.make_regression(n_samples=50, n_features=2)
        data, target = dataset

        data_scaler = preprocessing.MinMaxScaler()
        target_scaler = preprocessing.MinMaxScaler()

        x_train, x_test, y_train, y_test = train_test_split(
            data_scaler.fit_transform(data),
            target_scaler.fit_transform(target.reshape(-1, 1)),
            test_size=0.15)

        lmnet = algorithms.LevenbergMarquardt(
            connection=[
                layers.Input(2),
                layers.Sigmoid(6),
                layers.Sigmoid(1),
            ],
            mu_update_factor=2,
            mu=0.1,
            verbose=False,
            show_epoch=1,
        )
        lmnet.train(x_train, y_train, epochs=4)
        error = lmnet.prediction_error(x_test, y_test)

        self.assertAlmostEqual(0.006, error, places=3)
コード例 #4
0
 def Levenberg_Marquardt(self): #etoimh methodos apo tin ergaleiothiki       
     del self.beta
     lm=algorithms.LevenbergMarquardt(connection=self.Initialize_Connection())                                
     lm.fit(self.inputs,self.targets)
     lm.train(self.inputs,self.targets,epochs=self.epochs,epsilon=self.errorTolerance)                                                   
     for i in lm.errors:            
         self.errors.append(i)
     predictTest=lm.predict(self.xtest)
     self.estimating(predictTest)                                                                                                                 
     return
コード例 #5
0
	def select_algorithm(self, algorithm, options=None):
		try:
			self.network = algorithms.LevenbergMarquardt(self.layers)
			opt = options
			print(opt[1])
			print("Wybrano optymalizator: " + str(algorithm))
		except RecursionError:
			print("Problem rekursji")
			return None

		if algorithm == 'GradientDescent':
			self.network = algorithms.GradientDescent(self.layers)
		if algorithm == 'LevenbergMarquardt':
			self.network = algorithms.LevenbergMarquardt(connection=self.layers, mu=opt[0], mu_update_factor=opt[1])
		if algorithm == 'Adam':
			self.network = algorithms.Adam(self.layers)
		if algorithm == 'QuasiNewton':
			self.network = algorithms.QuasiNewton(self.layers)
		if algorithm == 'Quickprop':
			self.network = algorithms.Quickprop(self.layers)
		if algorithm == 'MinibatchGradientDescent':
			self.network = algorithms.MinibatchGradientDescent(self.layers)
		if algorithm == 'ConjugateGradient':
			self.network = algorithms.ConjugateGradient(self.layers)
		if algorithm == 'Hessian':
			self.network = algorithms.Hessian(self.layers)
		if algorithm == 'HessianDiagonal':
			self.network = algorithms.HessianDiagonal(self.layers)
		if algorithm == 'Momentum':
			self.network = algorithms.Momentum(self.layers)
		if algorithm == 'RPROP':
			self.network = algorithms.RPROP(self.layers)
		if algorithm == 'IRPROPPlus':
			self.network = algorithms.IRPROPPlus(self.layers)
		if algorithm == 'Adadelta':
			self.network = algorithms.Adadelta(self.layers)
		if algorithm == 'Adagrad':
			self.network = algorithms.Adagrad(self.layers)
		if algorithm == 'RMSProp':
			self.network = algorithms.RMSProp(self.layers)
		if algorithm == 'Adamax':
			self.network = algorithms.Adamax(self.layers)
コード例 #6
0
ファイル: playground.py プロジェクト: altek42/inz
    def run(self):
        self.prepareData()
        self.showData()
        # net = Net(1,8,2)
        lmnet = algorithms.LevenbergMarquardt([
            layers.Input(1),
            layers.Sigmoid(8),
            layers.Sigmoid(2),
        ],
                                              verbose=True,
                                              shuffle_data=True)

        self.showNetData(lmnet, 'dumb')
        # net.Train(self.input, self.output)
        a = np.array(self.input)
        b = np.array(self.output)
        lmnet.fit(a, b, epochs=100)
        self.showNetData(lmnet, 'trained')
        plt.show()
コード例 #7
0
def LevenbergMarquardt(col_predict, no_of_output_para, input_par, link, epoch,
                       units, tf):
    global graph
    with graph.as_default():

        dataset = pd.read_excel(link)

        #check for empty column
        cols_out = dataset.columns[col_predict:col_predict + 1]
        for col in cols_out:
            if "Unnamed" in col:
                return 0

        X = dataset.iloc[:,
                         no_of_output_para + 1:dataset.values[0].size].values
        Y = dataset.iloc[:, col_predict].values
        # np.random.seed(0)

        X_train = np.array(X)
        y_train = np.array(Y)
        X_train, X_test, y_train, y_test = train_test_split(X,
                                                            Y,
                                                            test_size=0.2,
                                                            random_state=0)

        sc = StandardScaler()
        X_train = sc.fit_transform(X_train)
        X_test = sc.transform(X_test)

        network = Input(input_par) >> Sigmoid(int(units / 10) + 1) >> Relu(1)
        optimizer = algorithms.LevenbergMarquardt([network],
                                                  verbose=False,
                                                  shuffle_data=False)

        optimizer.train(X_train, y_train, epochs=epoch)

        joblib.dump(optimizer, link + "-" + str(col_predict) + ".pkl")
        'Moving_av', 'Upper_volatility', 'Lower_volatility',
        'Short_resistance', 'Short_support', 'Long_resistance', 'Long_support',
        'Adj Close'
    ]].values

    scaler = StandardScaler()
    dataset = scaler.fit_transform(dataset)

    scaler = MinMaxScaler(feature_range=[0, 1])
    dataset = scaler.fit_transform(dataset)
    scaler_filename = 'scalers/' + stock + '_complete_' + interval + '.save'
    pickle.dump(scaler, open(scaler_filename, 'wb'))

    X, y = split_sequences(dataset[-300:-samples_test], n_steps_in,
                           n_steps_out)
    n_features = X.shape[2]
    y = y[:, :, -1:]

    x_train = X.reshape(len(X), n_steps_in * n_features)

    y_train = y.reshape(len(X), n_steps_out)

    network = Input(
        n_steps_in * n_features) >> Sigmoid(50) >> Sigmoid(n_steps_out)
    model = algorithms.LevenbergMarquardt(network, verbose=False, show_epoch=5)
    model.fit(x_train, y_train, epochs=epochs)

    filename = 'models/complete_' + stock + '-LevenbergMarquardt-' + interval + '.pickle'
    with open(filename, 'wb') as f:
        pickle.dump(model, f)
コード例 #9
0
train_size = int(t.shape[0] * 0.9)
train_size

X_train = t[:train_size]
y_train = x[:train_size]

X_test = t[train_size:]
y_test = x[train_size:]

scaler_x = StandardScaler()
scaler_y = StandardScaler()
tmp_train_scaled_x = scaler_x.fit_transform(X_train[:, np.newaxis])
tmp_test_scaled_x = scaler_x.transform(X_test[:, np.newaxis])
tmp_train_scaled_y = scaler_y.fit_transform(y_train[:, np.newaxis])

lmnet = algorithms.LevenbergMarquardt((Input(1), Tanh(60), Linear(1)),
                                      verbose=True)

lmnet.train(X_train, y_train, epochs=100)

pred_x = lmnet.predict(X_train)
mse = sklearn.metrics.mean_squared_error(y_train, pred_x.flatten())
print(f'RMSE = {np.sqrt(mse)}')

plt.plot(X_train, y_train, label='train')
plt.plot(X_train, pred_x, label='predict')
plt.legend()

pred_x = lmnet.predict(X_test)
mse = sklearn.metrics.mean_squared_error(y_test, pred_x.flatten())
print(f'RMSE = {np.sqrt(mse)}')
コード例 #10
0
import numpy as np
from neupy import algorithms, plots

x_train = np.array([[1, 2], [3, 4]])
y_train = np.array([[1], [0]])

lmnet = algorithms.LevenbergMarquardt((2, 3, 1))
lmnet.train(x_train, y_train)

plots.error_plot(lmnet)
コード例 #11
0
 def test_levenberg_marquardt_assign_step_exception(self):
     with self.assertRaises(ValueError):
         algorithms.LevenbergMarquardt((2, 3, 1), step=0.01)
コード例 #12
0
 def test_levenberg_marquardt_invalid_error_exceptions(self):
     with self.assertRaises(ValueError):
         algorithms.LevenbergMarquardt((2, 3, 1),
                                       error='categorical_crossentropy')
コード例 #13
0
Y = np.array(Y)

# the first 2500 out of 3000 emails will serve as training data
x_train = X[0:3067]
y_train = Y[0:3067]

# the rest 500 emails will serve as testing data
x_test = X[3067:]
y_test = Y[3067:]

lmnet = algorithms.LevenbergMarquardt(
    [
        layers.Input(57),
        layers.Sigmoid(7),
        layers.Sigmoid(1),
    ],
    verbose=True,
    shuffle_data=True,
    mu=0.1,
    mu_update_factor=1.2,
    error="mse",
)
lmnet.train(input_train=x_train, target_train=y_train, epochs=200)

plots.error_plot(lmnet)

y_train_predicted = lmnet.predict(x_train).round()
y_test_predicted = lmnet.predict(x_test).round()

print(metrics.classification_report(y_train_predicted, y_train))
print(metrics.confusion_matrix(y_train_predicted, y_train))
print()
コード例 #14
0
ファイル: NeuPySky.py プロジェクト: TokeF/MasterThesis
# lbl_test[lbl_test==0] = -1
#normalise by zero mean, 1 std
sc = StandardScaler()
dbdt_train = sc.fit_transform(dbdt_train)
dbdt_test = sc.transform(dbdt_test)
#Build input array, where a sounding and its neighbours are stacked
dbdt_train = build_array_skytem(3, dbdt_train)
dbdt_test = build_array_skytem(3, dbdt_test)

network = join(
    Input(51),
    Tanh(30),
    Tanh(1),
)

model2 = algorithms.LevenbergMarquardt(network, verbose=True)
model2.train(dbdt_train.T, lbl_train.T, epochs=10)

# ## compute metrics
lbl_pred_scor = model2.predict(dbdt_test.T)
lbl_pred = lbl_pred_scor > 0
print(lbl_pred)
print(sum(lbl_pred[0] == lbl_test) / len(lbl_test))

from sklearn.metrics import classification_report, confusion_matrix
print(confusion_matrix(lbl_test, lbl_pred))
print(classification_report(lbl_test, lbl_pred))

#
# plot_training(history)
# plot_misclassified(timestamp[r+1:-1], dbdt_testOG, lbl_test, lbl_pred)