Beispiel #1
0
class TestNNBoolFunc(unittest.TestCase):
    def setUp(self):
        self.nn = NN(seed=0,
                     learning_algorithm=batch,
                     error_calculator=ErrorCalculator.MIS,
                     architecture=MultilayerPerceptron(
                         2,
                         activation=sigmoid,
                         activation_hidden=relu,
                         alambd=0,
                         alpha=0.9,
                         eta=0.9,
                     ))

    def test_and(self):
        self.try_data([
            ([0, 0], [0]),
            ([0, 1], [0]),
            ([1, 0], [0]),
            ([1, 1], [1]),
        ])

    def test_or(self):
        self.try_data([
            ([0, 0], [0]),
            ([0, 1], [1]),
            ([1, 0], [1]),
            ([1, 1], [1]),
        ])

    def test_xor(self):
        self.try_data([
            ([0, 0], [0]),
            ([0, 1], [1]),
            ([1, 0], [1]),
            ([1, 1], [0]),
        ])

    def try_data(self, data):
        self.nn.set()

        while True:
            self.nn.fit(data)
            print(self.nn.compute_error(data))
            if self.nn.compute_error(data) == 0:
                break

        self.assertEqual(self.nn.compute_error(data), 0)
Beispiel #2
0
    def test_monk1(self):
        nn = NN(seed=4,
                epochs_limit=400,
                learning_algorithm=batch,
                error_calculator=ErrorCalculator.MSE,
                architecture=MultilayerPerceptron(
                    4,
                    activation=sigmoid,
                    activation_hidden=relu,
                    eta=0.5,
                    alambd=0,
                    alpha=0.8,
                ))

        train_data, test_data = read_monk(1)

        nn.fit(train_data)
        train_errs = nn.compute_learning_curve(train_data, ErrorCalculator.MIS)

        test_errs = nn.compute_learning_curve(test_data, ErrorCalculator.MIS)

        error_train = 0
        for x, d in train_data:
            error_train += (round(nn(x)[0][-1]) - d[0])**2

        error_test = 0
        for x, d in test_data:
            error_test += (round(nn(x)[0][-1]) - d[0])**2

        print(
            'train:',
            str(((len(train_data) - error_train) / len(train_data)) * 100) +
            '%')
        print(
            'test: ',
            str(((len(test_data) - error_test) / len(test_data)) * 100) + '%')

        self.assertEqual(error_train, 0)
        self.assertEqual(error_test, 0)

        nn.error_calculator = ErrorCalculator.MIS
        self.assertEqual(nn.compute_error(train_data), 0)
        self.assertEqual(nn.compute_error(test_data), 0)
Beispiel #3
0
        n_init=1,
        error_calculator=ErrorCalculator.MSE,
        architecture=MultilayerPerceptron(
            size_hidden_layers=(2, 2),
            eta=eta,
            alpha=alpha,
            alambd=alambd,
            activation=tanh_classification,
            activation_hidden=relu,
        ),
    )

    nn.fit(train_set)

    nn.error_calculator = ErrorCalculator.MSE
    print('mse', nn.compute_error(train_set), nn.compute_error(validation_set),
          nn.compute_error(test_data))

    nn.error_calculator = ErrorCalculator.MEE
    print('mee', nn.compute_error(train_set), nn.compute_error(validation_set),
          nn.compute_error(test_data))

    nn.error_calculator = ErrorCalculator.ACC
    print('acc', nn.compute_error(train_set), nn.compute_error(validation_set),
          nn.compute_error(test_data))

    # MSE
    nn.error_calculator = ErrorCalculator.MSE
    training_curve = nn.compute_learning_curve(train_set)
    validation_curve = nn.compute_learning_curve(validation_set)
    testing_curve = nn.compute_learning_curve(test_data)