Exemple #1
0
    def test_handle_errors(self):
        data, target = datasets.make_classification(300,
                                                    n_features=4,
                                                    n_classes=2)
        x_train, x_test, y_train, y_test = model_selection.train_test_split(
            data, target, test_size=0.3)

        with self.assertRaises(ValueError):
            # First network has two output layers and the second
            # just one.
            algorithms.DynamicallyAveragedNetwork([
                algorithms.RPROP((4, 10, 2), step=0.1),
                algorithms.GradientDescent((4, 10, 1), step=0.1)
            ])

        with self.assertRaises(ValueError):
            # Use ensemble with less than one network
            algorithms.DynamicallyAveragedNetwork(
                [algorithms.GradientDescent((4, 10, 1), step=0.1)])

        with self.assertRaises(ValueError):
            # Output greater than 1
            dan = algorithms.DynamicallyAveragedNetwork([
                algorithms.GradientDescent([
                    Input(4),
                    Sigmoid(10),
                    Relu(1, weight=init.Uniform(), bias=init.Uniform()),
                ],
                                           step=0.01),
                algorithms.RPROP((4, 10, 1), step=0.01),
            ])
            dan.train(x_train, y_train, epochs=10)
            dan.predict(x_test)
Exemple #2
0
    def test_handle_errors(self):
        data, target = datasets.make_classification(300, n_features=4,
                                                    n_classes=2)
        x_train, x_test, y_train, y_test = cross_validation.train_test_split(
            data, target, train_size=0.7
        )

        with self.assertRaises(ValueError):
            # First network has two output layers and the second
            # just one.
            ensemble.DynamicallyAveragedNetwork([
                algorithms.RPROP((4, 10, 2), step=0.1),
                algorithms.Backpropagation((4, 10, 1), step=0.1)
            ])

        with self.assertRaises(ValueError):
            # Use ensemble with less than one network
            ensemble.DynamicallyAveragedNetwork([
                algorithms.Backpropagation((4, 10, 1), step=0.1)
            ])

        with self.assertRaises(ValueError):
            # Output between -1 and 1
            dan = ensemble.DynamicallyAveragedNetwork([
                algorithms.Backpropagation(
                    SigmoidLayer(4) > TanhLayer(10) > OutputLayer(1),
                    step=0.01
                ),
                algorithms.RPROP((4, 10, 1), step=0.1)
            ])
            dan.train(x_train, y_train, epochs=10)
            dan.predict(x_test)
Exemple #3
0
    def test_irpropplus(self):
        options = dict(minstep=0.001,
                       maxstep=1,
                       increase_factor=1.1,
                       decrease_factor=0.1,
                       step=1,
                       verbose=False)

        uniform = init.Uniform()
        params1 = dict(
            weight=uniform.sample((3, 10), return_array=True),
            bias=uniform.sample((10, ), return_array=True),
        )
        params2 = dict(
            weight=uniform.sample((10, 2), return_array=True),
            bias=uniform.sample((2, ), return_array=True),
        )

        network = layers.join(
            Input(3),
            Sigmoid(10, **params1),
            Sigmoid(2, **params2),
        )

        nw = algorithms.IRPROPPlus(copy.deepcopy(network), **options)
        nw.train(simple_x_train, simple_y_train, epochs=100)
        irprop_plus_error = nw.errors.train[-1]
        self.assertGreater(1e-4, nw.errors.train[-1])

        nw = algorithms.RPROP(copy.deepcopy(network), **options)
        nw.train(simple_x_train, simple_y_train, epochs=100)
        rprop_error = nw.errors.train[-1]
        self.assertGreater(rprop_error, irprop_plus_error)
Exemple #4
0
    def test_rprop(self):
        nw = algorithms.RPROP(self.network,
                              minstep=0.001,
                              maxstep=1,
                              increase_factor=1.1,
                              decrease_factor=0.1,
                              step=1,
                              verbose=False)

        nw.train(simple_x_train, simple_y_train, epochs=100)
        self.assertGreater(1e-4, nw.errors.train[-1])
Exemple #5
0
    def test_rprop(self):
        nw = algorithms.RPROP(self.connection,
                              minstep=0.001,
                              maxstep=1,
                              increase_factor=1.1,
                              decrease_factor=0.1,
                              step=1,
                              verbose=False)

        nw.train(simple_input_train, simple_target_train, epochs=100)
        self.assertGreater(1e-4, nw.errors.last())
Exemple #6
0
    def test_rprop(self):
        nw = algorithms.RPROP(self.connection,
                              minimum_step=0.001,
                              maximum_step=1,
                              increase_factor=1.1,
                              decrease_factor=0.1,
                              step=1,
                              use_raw_predict_at_error=True,
                              verbose=False)

        nw.train(simple_input_train, simple_target_train, epochs=100)
        self.assertGreater(1e-4, nw.last_error_in())
Exemple #7
0
    def test_handle_errors(self):
        data, target = datasets.make_classification(300, n_features=4,
                                                    n_classes=2)
        x_train, x_test, y_train, y_test = cross_validation.train_test_split(
            data, target, train_size=0.7
        )

        with self.assertRaises(ValueError):
            # First network has two output layers and the second
            # just one.
            algorithms.DynamicallyAveragedNetwork([
                algorithms.RPROP((4, 10, 2), step=0.1),
                algorithms.GradientDescent((4, 10, 1), step=0.1)
            ])

        with self.assertRaises(ValueError):
            # Use ensemble with less than one network
            algorithms.DynamicallyAveragedNetwork([
                algorithms.GradientDescent((4, 10, 1), step=0.1)
            ])

        with self.assertRaises(ValueError):
            # Output greater than 1
            dan = algorithms.DynamicallyAveragedNetwork([
                algorithms.GradientDescent(
                    [
                        Sigmoid(4),
                        Relu(10, init_method='bounded', bounds=(0, 1)),
                        Output(1),
                    ],
                    step=0.01
                ),
                algorithms.RPROP((4, 10, 1), step=0.01),
            ])
            dan.train(x_train, y_train, epochs=10)
            dan.predict(x_test)
Exemple #8
0
    def test_dan(self):
        data, target = datasets.make_classification(300, n_features=4,
                                                    n_classes=2)
        x_train, x_test, y_train, y_test = cross_validation.train_test_split(
            data, target, train_size=0.7
        )

        dan = algorithms.DynamicallyAveragedNetwork([
            algorithms.RPROP((4, 10, 1), step=0.1, maxstep=1),
            algorithms.GradientDescent((4, 5, 1), step=0.1),
            algorithms.ConjugateGradient((4, 5, 1), step=0.01),
        ])

        dan.train(x_train, y_train, epochs=500)
        result = dan.predict(x_test)
        ensemble_result = metrics.accuracy_score(y_test, result)
        self.assertAlmostEqual(0.9222, ensemble_result, places=4)
Exemple #9
0
    def test_irpropplus(self):
        options = dict(minimum_step=0.001,
                       maximum_step=1,
                       increase_factor=1.1,
                       decrease_factor=0.1,
                       step=1,
                       verbose=False)
        nw = algorithms.IRPROPPlus(copy.deepcopy(self.connection), **options)

        nw.train(simple_input_train, simple_target_train, epochs=100)
        irprop_plus_error = nw.last_error_in()
        self.assertGreater(1e-4, nw.last_error_in())

        nw = algorithms.RPROP(copy.deepcopy(self.connection), **options)

        nw.train(simple_input_train, simple_target_train, epochs=100)
        rprop_error = nw.last_error_in()
        self.assertGreater(rprop_error, irprop_plus_error)
Exemple #10
0
	def select_algorithm(self, algorithm, options=None):
		try:
			self.network = algorithms.LevenbergMarquardt(self.layers)
			opt = options
			print(opt[1])
			print("Wybrano optymalizator: " + str(algorithm))
		except RecursionError:
			print("Problem rekursji")
			return None

		if algorithm == 'GradientDescent':
			self.network = algorithms.GradientDescent(self.layers)
		if algorithm == 'LevenbergMarquardt':
			self.network = algorithms.LevenbergMarquardt(connection=self.layers, mu=opt[0], mu_update_factor=opt[1])
		if algorithm == 'Adam':
			self.network = algorithms.Adam(self.layers)
		if algorithm == 'QuasiNewton':
			self.network = algorithms.QuasiNewton(self.layers)
		if algorithm == 'Quickprop':
			self.network = algorithms.Quickprop(self.layers)
		if algorithm == 'MinibatchGradientDescent':
			self.network = algorithms.MinibatchGradientDescent(self.layers)
		if algorithm == 'ConjugateGradient':
			self.network = algorithms.ConjugateGradient(self.layers)
		if algorithm == 'Hessian':
			self.network = algorithms.Hessian(self.layers)
		if algorithm == 'HessianDiagonal':
			self.network = algorithms.HessianDiagonal(self.layers)
		if algorithm == 'Momentum':
			self.network = algorithms.Momentum(self.layers)
		if algorithm == 'RPROP':
			self.network = algorithms.RPROP(self.layers)
		if algorithm == 'IRPROPPlus':
			self.network = algorithms.IRPROPPlus(self.layers)
		if algorithm == 'Adadelta':
			self.network = algorithms.Adadelta(self.layers)
		if algorithm == 'Adagrad':
			self.network = algorithms.Adagrad(self.layers)
		if algorithm == 'RMSProp':
			self.network = algorithms.RMSProp(self.layers)
		if algorithm == 'Adamax':
			self.network = algorithms.Adamax(self.layers)
Exemple #11
0
    def test_irpropplus(self):
        options = dict(minstep=0.001,
                       maxstep=1,
                       increase_factor=1.1,
                       decrease_factor=0.1,
                       step=1,
                       verbose=False)
        connection = [
            Input(3),
            Sigmoid(10, init_method='bounded', bounds=(0, 1)),
            Sigmoid(2, init_method='bounded', bounds=(0, 1)),
        ]

        nw = algorithms.IRPROPPlus(copy.deepcopy(connection), **options)
        nw.train(simple_input_train, simple_target_train, epochs=100)
        irprop_plus_error = nw.errors.last()
        self.assertGreater(1e-4, nw.errors.last())

        nw = algorithms.RPROP(copy.deepcopy(connection), **options)
        nw.train(simple_input_train, simple_target_train, epochs=100)
        rprop_error = nw.errors.last()
        self.assertGreater(rprop_error, irprop_plus_error)
    def test_ensemble(self):
        data, target = datasets.make_classification(300,
                                                    n_features=4,
                                                    n_classes=2)
        x_train, x_test, y_train, y_test = train_test_split(data,
                                                            target,
                                                            train_size=0.7)

        dan = algorithms.DynamicallyAveragedNetwork([
            algorithms.RPROP((4, 5, 1), step=0.1, maxstep=1),
            algorithms.GradientDescent((4, 5, 1), step=0.1),
            algorithms.ConjugateGradient((4, 5, 1), step=0.01),
        ])

        pipeline = Pipeline([
            ('min_max_scaler', preprocessing.StandardScaler()),
            ('dan', dan),
        ])
        pipeline.fit(x_train, y_train, dan__epochs=100)

        result = pipeline.predict(x_test)
        ensemble_result = metrics.accuracy_score(y_test, result)
        self.assertAlmostEqual(0.9222, ensemble_result, places=4)
Exemple #13
0
logger.info("Loading word embedding NN")
word2vec = WordEmbeddingNN.load(WORD_EMBEDDING_NN)

prepare_data_pipeline = Pipeline([
    ('tokenize_texts', TokenizeText(ignore_stopwords=False)),
    ('ignore_unknown_words', IgnoreUnknownWords(dictionary=word2vec.vocab)),
    ('word_embedding', word2vec),
])

classifier = algorithms.RPROP(
    [
        layers.Relu(100),
        layers.Relu(200),
        layers.Sigmoid(50),
        layers.RoundedOutput(1),
    ],
    error='binary_crossentropy',
    verbose=True,
    shuffle_data=True,
    maxstep=1,
    minstep=1e-7,
)

logger.info("Preparing train data")
train_data = data[data.type == 'train']
texts = train_data.text.values
x_train = prepare_data_pipeline.transform(texts)
y_train = (train_data.sentiment.values == 'pos')

logger.info("Preparing test data")
test_data = data[data.type == 'test']
Exemple #14
0
# los primero 3067 de los 4601 datos serviran para el entrenmiento
x_train = X[0:3067]
y_train = Y[0:3067]

# el resto de los datos serviran para la validacion (1534)
x_test = X[3067:]
y_test = Y[3067:]

#se crea la red neuronal con la arquitectura 57 -7 -1
rpropnet = algorithms.RPROP(
    [
        layers.Input(57),
        layers.Sigmoid(7),
        layers.Sigmoid(1),
    ],
    error='mse',
    verbose=True,
    shuffle_data=True,
    maxstep=1,
    minstep=1e-7,
)

#se realiza el entrenamiento de la red
rpropnet.train(input_train=x_train,target_train=y_train,epochs=200)

#se muestra un grafico de los errores cometidos en el entrenamiento
plots.error_plot(rpropnet)

y_train_predicted = rpropnet.predict(x_train).round()
y_test_predicted = rpropnet.predict(x_test).round()