示例#1
0
    def test_conjgrad_assign_step_exception(self):
        with self.assertRaises(ValueError):
            # Don't have step parameter
            algorithms.ConjugateGradient((2, 3, 1), step=0.01)

        with self.assertRaises(ValueError):
            # Don't have step parameter
            algorithms.ConjugateGradient((2, 3, 1),
                                         addons=[algorithms.LinearSearch])
示例#2
0
    def fit(self,data,target):
        data_scaler = preprocessing.MinMaxScaler()
        target_scaler = preprocessing.MinMaxScaler()

        data = data_scaler.fit_transform(data)
        target = target_scaler.fit_transform(target.reshape(-1,1))

        environment.reproducible()
        x_train,x_test,y_train,y_test = train_test_split(data,target,train_size=0.85)
        self.x_train = x_train
        self.y_train = y_train
        self.x_test = x_test
        self.y_test = y_test
        print (x_test)

        cgnet = algorithms.ConjugateGradient(
            connection=[
                layers.Input(2),
                layers.Sigmoid(10),
                layers.Sigmoid(1),
            ],
            search_method = 'golden',
            show_epoch=25,
            verbose=True,
            addons=[algorithms.LinearSearch],
        )
        cgnet.train(x_train,y_train,x_test,y_test,epochs=100)
        self._model = cgnet
        return self
示例#3
0
 def test_conjgrad_assign_step_exception(self):
     with self.assertRaises(ValueError):
         # Don't have step parameter
         algorithms.ConjugateGradient(
             layers.Input(2) > layers.Sigmoid(3) > layers.Sigmoid(1),
             step=0.01,
         )
示例#4
0
def ANNForecastBuild(layer, step):
    return algorithms.ConjugateGradient(
        connection=layer,
        search_method='golden',
        show_epoch=25,
        verbose=True,
        step=step,
        addons=[algorithms.LinearSearch],
    )
 def Conjugate_Gradient(self): #etoimh methodos apo tin ergaleiothiki             
     cgd=algorithms.ConjugateGradient(connection=self.Initialize_Connection(),step=self.beta)
     cgd.fit(self.inputs,self.targets)                                        
     cgd.train(self.xtrain,self.ttrain,epochs=self.epochs,epsilon=self.errorTolerance)                                                        
     for i in cgd.errors:            
         self.errors.append(i)
     print(self.errors)
     predictTest=cgd.predict(self.xtest)         
     self.estimating(predictTest)
     return
示例#6
0
 def test_conjugate_gradient(self):
     nw = algorithms.ConjugateGradient(self.connection,
                                       step=5,
                                       error=cross_entropy_error,
                                       use_raw_predict_at_error=False,
                                       shuffle_data=True,
                                       update_function='polak_ribiere')
     nw.train(simple_input_train, simple_target_train, epochs=300)
     result = nw.predict(simple_input_train)
     norm = np.linalg.norm(result - simple_target_train)
     self.assertGreater(1e-2, norm)
示例#7
0
 def test_conjgrad(self):
     nw = algorithms.ConjugateGradient(self.connection,
                                       step=1,
                                       error='mse',
                                       shuffle_data=True,
                                       verbose=False,
                                       update_function='fletcher_reeves')
     nw.train(simple_input_train, simple_target_train, epochs=100)
     result = nw.predict(simple_input_train)
     norm = np.linalg.norm(result - simple_target_train)
     self.assertAlmostEqual(0.05, norm, places=2)
示例#8
0
def build_net(n_input, activation=layers.Sigmoid, sizes=[3, 3]):
    net = layers.Input(n_input)
    for size in sizes:
        net = net > activation(size)
    net = net > layers.Linear(1)

    conj = neual.ConjugateGradient(connection=net,
                                   step=0.005,
                                   addons=[neual.LinearSearch],
                                   show_epoch=25)
    return conj
示例#9
0
    def test_conjgrad(self):
        cgnet = algorithms.ConjugateGradient(
            (10, 5, 1),
            error='binary_crossentropy',
            shuffle_data=True,
            verbose=False,
            update_function='fletcher_reeves',
        )
        x_train, x_test, y_train, y_test = simple_classification()

        cgnet.train(x_train, y_train, x_test, y_test, epochs=50)
        actual_prediction = cgnet.predict(x_test).round().T

        error = metrics.accuracy_score(actual_prediction[0], y_test)
        self.assertAlmostEqual(error, 0.9, places=1)
示例#10
0
文件: test_dan.py 项目: disc5/neupy
    def test_dan(self):
        data, target = datasets.make_classification(300, n_features=4,
                                                    n_classes=2)
        x_train, x_test, y_train, y_test = cross_validation.train_test_split(
            data, target, train_size=0.7
        )

        dan = algorithms.DynamicallyAveragedNetwork([
            algorithms.RPROP((4, 10, 1), step=0.1, maxstep=1),
            algorithms.GradientDescent((4, 5, 1), step=0.1),
            algorithms.ConjugateGradient((4, 5, 1), step=0.01),
        ])

        dan.train(x_train, y_train, epochs=500)
        result = dan.predict(x_test)
        ensemble_result = metrics.accuracy_score(y_test, result)
        self.assertAlmostEqual(0.9222, ensemble_result, places=4)
示例#11
0
def run_neural_net(connection, data):

    #import_modules()

    dataset = data

    data, target = dataset.data, dataset.target

    data_scalar = preprocessing.MinMaxScaler()
    target_scalar = preprocessing.MinMaxScaler()

    data = data_scalar.fit_transform(data)
    target = target_scalar.fit_transform(target.reshape(-1, 1))

    environment.reproducible()

    x_train, x_test, y_train, y_test = train_test_split(data,
                                                        target,
                                                        train_size=0.85)

    cgnet = algorithms.ConjugateGradient(
        connection,
        search_method='golden',
        show_epoch=5,
        verbose=True,
        addons=[algorithms.LinearSearch],
    )

    time_start = time.time()
    cgnet.train(x_train, y_train, x_test, y_test, epochs=50)
    time_end = time.time()

    #plots.error_plot(cgnet)

    y_predict = cgnet.predict(x_test).round(1)
    error = mae(target_scalar.inverse_transform(y_test), \
                  target_scalar.inverse_transform(y_predict))

    #print(time_end - time_start)

    #print(target_scalar.inverse_transform(y_test), \
    #              target_scalar.inverse_transform(y_predict))

    #print(error)

    return ([time_end - time_start, error])
示例#12
0
	def select_algorithm(self, algorithm, options=None):
		try:
			self.network = algorithms.LevenbergMarquardt(self.layers)
			opt = options
			print(opt[1])
			print("Wybrano optymalizator: " + str(algorithm))
		except RecursionError:
			print("Problem rekursji")
			return None

		if algorithm == 'GradientDescent':
			self.network = algorithms.GradientDescent(self.layers)
		if algorithm == 'LevenbergMarquardt':
			self.network = algorithms.LevenbergMarquardt(connection=self.layers, mu=opt[0], mu_update_factor=opt[1])
		if algorithm == 'Adam':
			self.network = algorithms.Adam(self.layers)
		if algorithm == 'QuasiNewton':
			self.network = algorithms.QuasiNewton(self.layers)
		if algorithm == 'Quickprop':
			self.network = algorithms.Quickprop(self.layers)
		if algorithm == 'MinibatchGradientDescent':
			self.network = algorithms.MinibatchGradientDescent(self.layers)
		if algorithm == 'ConjugateGradient':
			self.network = algorithms.ConjugateGradient(self.layers)
		if algorithm == 'Hessian':
			self.network = algorithms.Hessian(self.layers)
		if algorithm == 'HessianDiagonal':
			self.network = algorithms.HessianDiagonal(self.layers)
		if algorithm == 'Momentum':
			self.network = algorithms.Momentum(self.layers)
		if algorithm == 'RPROP':
			self.network = algorithms.RPROP(self.layers)
		if algorithm == 'IRPROPPlus':
			self.network = algorithms.IRPROPPlus(self.layers)
		if algorithm == 'Adadelta':
			self.network = algorithms.Adadelta(self.layers)
		if algorithm == 'Adagrad':
			self.network = algorithms.Adagrad(self.layers)
		if algorithm == 'RMSProp':
			self.network = algorithms.RMSProp(self.layers)
		if algorithm == 'Adamax':
			self.network = algorithms.Adamax(self.layers)
示例#13
0
def run_neural_net():

    import_modules()

    dataset = datasets.load_boston()
    data, target = dataset.data, dataset.target

    data_scalar = preprocessing.MinMaxScaler()
    target_scalar = preprocessing.MinMaxScaler()

    data = data_scalar.fit_transform(data)
    target = target_scalar.fit_transform(target.reshape(-1, 1))

    environment.reproducible()

    x_train, x_test, y_train, y_test = train_test_split(data,
                                                        target,
                                                        train_size=0.85)

    cgnet = algorithms.ConjugateGradient(
        connection=[
            layers.Input(13),
            layers.Sigmoid(75),
            layers.Sigmoid(25),
            layers.Sigmoid(1),
        ],
        search_method='golden',
        show_epoch=1,
        verbose=True,
        addons=[algorithms.LinearSearch],
    )

    cgnet.train(x_train, y_train, x_test, y_test, epochs=30)

    plots.error_plot(cgnet)

    y_predict = cgnet.predict(x_test).round(1)
    error = rmsle(target_scalar.invers_transform(y_test), \
                  target_scalar.invers_transform(y_predict))

    return (error)
示例#14
0
    def test_linear_search(self):
        methods = [
            ('golden', 0.34202),
            ('brent', 0.34942),
        ]

        for method_name, valid_error in methods:
            np.random.seed(self.random_seed)

            dataset = datasets.load_boston()
            data, target = dataset.data, dataset.target

            data_scaler = preprocessing.MinMaxScaler()
            target_scaler = preprocessing.MinMaxScaler()

            x_train, x_test, y_train, y_test = train_test_split(
                data_scaler.fit_transform(data),
                target_scaler.fit_transform(target.reshape(-1, 1)),
                train_size=0.85)

            cgnet = algorithms.ConjugateGradient(
                connection=[
                    layers.Input(13),
                    layers.Sigmoid(50),
                    layers.Sigmoid(1),
                ],
                show_epoch=1,
                verbose=False,
                search_method=method_name,
                tol=0.1,
                addons=[algorithms.LinearSearch],
            )
            cgnet.train(x_train, y_train, epochs=4)
            y_predict = cgnet.predict(x_test).round(1)

            error = rmsle(target_scaler.inverse_transform(y_test),
                          target_scaler.inverse_transform(y_predict))

            self.assertAlmostEqual(valid_error, error, places=5)
示例#15
0
    def go(self):
        raw = self.datafile.read().splitlines()

        data = self._prepare_data(raw[::2])
        target = self._prepare_target(raw[1::2])
        print(len(data))
        print(len(target))

        environment.reproducible()

        x_train, x_test, y_train, y_test = train_test_split(data,
                                                            target,
                                                            train_size=0.85)

        print(x_train[0])
        connections = [
            layers.Input(100),
            layers.Linear(200),
            layers.Sigmoid(150),
            layers.Sigmoid(5),
        ]

        cgnet = algorithms.ConjugateGradient(
            connection=connections,
            search_method='golden',
            show_epoch=25,
            verbose=True,
            addons=[algorithms.LinearSearch],
        )

        cgnet.train(x_train, y_train, x_test, y_test, epochs=100)
        plots.error_plot(cgnet)

        y_predict = cgnet.predict(x_test).round(1)
        error = rmsle(y_test, y_predict)
        print(error)

        with open('lib/net/base_searcher.pickle', 'wb') as f:
            pickle.dump(cgnet, f)
示例#16
0
    def test_ensemble(self):
        data, target = datasets.make_classification(300,
                                                    n_features=4,
                                                    n_classes=2)
        x_train, x_test, y_train, y_test = train_test_split(data,
                                                            target,
                                                            train_size=0.7)

        dan = algorithms.DynamicallyAveragedNetwork([
            algorithms.RPROP((4, 5, 1), step=0.1, maxstep=1),
            algorithms.GradientDescent((4, 5, 1), step=0.1),
            algorithms.ConjugateGradient((4, 5, 1), step=0.01),
        ])

        pipeline = Pipeline([
            ('min_max_scaler', preprocessing.StandardScaler()),
            ('dan', dan),
        ])
        pipeline.fit(x_train, y_train, dan__epochs=100)

        result = pipeline.predict(x_test)
        ensemble_result = metrics.accuracy_score(y_test, result)
        self.assertAlmostEqual(0.9222, ensemble_result, places=4)
示例#17
0
data_scaler = preprocessing.MinMaxScaler()
target_scaler = preprocessing.MinMaxScaler()

data = data_scaler.fit_transform(data)
target = target_scaler.fit_transform(target)

x_train, x_test, y_train, y_test = train_test_split(data,
                                                    target,
                                                    train_size=0.85)

cgnet = algorithms.ConjugateGradient(
    connection=[
        layers.SigmoidLayer(13),
        layers.SigmoidLayer(50),
        layers.OutputLayer(1),
    ],
    search_method='golden',
    show_epoch=25,
    verbose=True,
    optimizations=[algorithms.LinearSearch],
)

cgnet.train(x_train, y_train, x_test, y_test, epochs=100)
cgnet.plot_errors()

y_predict = cgnet.predict(x_test).round(1)
error = rmsle(target_scaler.inverse_transform(y_test),
              target_scaler.inverse_transform(y_predict))
print("RMSLE = {}".format(error))
x_train, x_test, y_train, y_test = train_test_split(data,
                                                    target,
                                                    train_size=0.85)

# Creating the neural network
#   connection:
#       Values being trained on. Currently lat, lon, year, bdrms, bathrooms, square feet,
#           residential, condo, year built (10)
#       Size of hidden layer. Currently arbitrarily set to 50
#       Size of output values. Currently bldg_price and land_price (2)
cgnet = algorithms.ConjugateGradient(
    connection=[
        layers.Input(10),
        layers.Sigmoid(50),
        layers.Sigmoid(2),
    ],
    search_method='golden',
    show_epoch=25,
    verbose=True,
    addons=[algorithms.LinearSearch],
)

# Train neural net
cgnet.train(x_train, y_train, x_test, y_test, epochs=100)

# Make predictions
y_predict = cgnet.predict(test)

# write values to csv
#   lat,lon,year,bdrms,fbath,hbath,sf,res,condo,built
with open('predict13-18.csv', 'w') as myfile:
示例#19
0
	)


# x_train, x_test, y_train, y_test = train_test_split(
# 	data_scaler.fit_transform(data),
# 	target_scaler.fit_transform(target.reshape(-1, 1)),
# 	test_size=0.15
# 	)


# pr gradient
optimizer = algorithms.ConjugateGradient(
	network=[
	layers.Input(4),
	layers.Softmax(3)
	],
	update_function='polak_ribiere',
	loss='categorical_crossentropy',
	verbose=True,
	show_epoch=1
	)

# cg newton
# optimizer = algorithms.QuasiNewton(
# 	network=[
# 	layers.Input(4),
# 	layers.Softmax(3)
# 	],
# 	update_function='dfp',
# 	loss='categorical_crossentropy',
# 	verbose=True,
# 	show_epoch=1