def test_forward_propagation(self):
        # given
        network = Network(4)
        network.add_layer(3, Sigmoid)
        network.add_layer(1, Linear)

        iris = load_iris()
        X = iris.data
        y = iris.target
        X_train, X_test, y_train, y_test = train_test_split(X,
                                                            y,
                                                            test_size=0.2,
                                                            shuffle=True)

        for epoch in range(100):
            for example_n in range(y_train.size):
                network.set_inputs(X_train[example_n])
                network.set_desired_outputs([y_train[example_n]])
                network.forward_propagate()
                network.backward_propagate()

        for i in range(y_test.size):
            network.set_inputs(X_test[i])
            network.set_desired_outputs([y_test[i]])

            network.forward_propagate()

            print("desired output: ", y_test[i])
            print("actual output:", network.get_outputs(), "\n")

        self.assertEquals(1, 1)
Exemple #2
0
class TestNetworkBackwardPropagation(unittest.TestCase):
    def test_backward_propagate(self):
        # given
        self.network = Network(inputs_n=4)
        self.network.add_layer(Layer(neurons_n=3, activation_f=Sigmoid))
        self.network.add_layer(Layer(neurons_n=1, activation_f=Sigmoid))
        self.network.compile()

        iris = load_iris()
        X = iris.data

        # Create a variable for the target data
        y = iris.target
        X_train, X_test, y_train, y_test = \
            train_test_split(X[:100], y[:100], test_size=0.2, shuffle=True)

        # when
        # for epoch in range(50):
        for epoch in range(112):
            for i in range(y_train.size):
                inputs_x = X_train[i].T  # 1 set of inputs
                desired_output = y_train[i]
                self.network.set_inputs(np.reshape(inputs_x, (4, 1)))
                self.network.set_desired_outputs(
                    np.reshape(desired_output, (1, 1)))
                self.network.forward_propagate()
                self.network.backward_propagate()

        print('a')

        correct_predictions = 0
        for i in range(y_test.size):
            inputs_x = X_test[i].T  # 1 set of inputs
            desired_output = y_test[i]
            self.network.set_inputs(np.reshape(inputs_x, (4, 1)))
            self.network.set_desired_outputs(np.reshape(
                desired_output, (1, 1)))
            self.network.forward_propagate()

            predicted = convert_output_to_prediction(
                self.network.get_actual_outputs())
            if predicted == y_test[i]:
                correct_predictions += 1

            # print("inputs: ", self.network.inputs_x)
            print("output predicted: ", self.network.get_actual_outputs())
            print("predicted: ", predicted)
            print("actual: ", y_test[i], "\n")

        print("correct predictions: ", correct_predictions)
Exemple #3
0
from activation_function.Linear import Linear
from activation_function.Sigmoid import Sigmoid
from model.Network import Network

network = Network(2)
network.add_layer(3, Sigmoid)
network.add_layer(1, Sigmoid)

network.set_inputs([-2, 0])
network.set_desired_outputs([2.4])

network.set_all_weights_to(1)
network.forward_propagate()
print("outputs", network.get_outputs())

network.backward_propagate()
network.forward_propagate()

print("outputs", network.get_outputs())