Exemplo n.º 1
0
def mnist_dataset():
    digits = datasets.load_digits()
    X = preprocessing.scale(digits.data.astype(float))
    y = to_vector(digits.target, 10)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4)
    nn = NeuralNetwork([64, 60, 10])
    nn.back_propagation(X_train, y_train, epochs=15, learning_rate=0.1)
    y_predicted = nn.predict(X_test)
    y_predicted = np.argmax(y_predicted, axis=1).astype(int)
    y_test = np.argmax(y_test, axis=1).astype(int)
    print(metrics.classification_report(y_test, y_predicted))
Exemplo n.º 2
0
def custom_dataset():
    x, y = prepare_data("./Letters", 400, 7800, 26)
    nn = NeuralNetwork([400, int(400 * 0.8), 26])
    X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.4)
    nn.back_propagation(X_train, y_train, epochs=10, learning_rate=0.1)
    y_predicted = nn.predict(X_test)
    y_predicted = np.argmax(y_predicted, axis=1).astype(int)
    y_test = np.argmax(y_test, axis=1).astype(int)
    print(
        metrics.classification_report(y_test,
                                      y_predicted,
                                      target_names=TARGET_NAMES))
Exemplo n.º 3
0
def learn_function(training_data_x,
                   training_data_y,
                   test_data_x,
                   test_data_y,
                   nn: NeuralNetwork = None):
    training_data_x = training_data_x.reshape((-1, 1))
    training_data_y = training_data_y.reshape((-1, 1))
    test_data_x = test_data_x.reshape((-1, 1))
    test_data_y = test_data_y.reshape((-1, 1))
    # build neural network
    if nn is None:
        nn = NeuralNetwork([
            NeuralNetworkHiddenLayerInfo('relu', 1),
            NeuralNetworkHiddenLayerInfo('tanh', 50),
            NeuralNetworkHiddenLayerInfo('sigmoid', 20)
        ], 1)
    # preprocess data for nn
    scaler_x = MinMaxScaler((0, 1))
    scaler_y = MinMaxScaler((0, 1))
    scaled_x_train = scaler_x.fit_transform(training_data_x)
    scaled_y_train = scaler_y.fit_transform(training_data_y)
    scaled_x_test = scaler_x.fit_transform(test_data_x)
    # learning
    draw = 10000
    for i in range(100000):
        nn.feed_forward(scaled_x_train)
        nn.back_propagation(scaled_y_train)

        # drawing for test data
        if i % draw == 0:
            pr = nn.predict(scaled_x_test)
            plt.scatter(test_data_x, test_data_y, 14)
            plt.scatter(test_data_x, scaler_y.inverse_transform(pr), 14)
            plt.title(
                f'Iteration: {i} '
                f'Error: {np.square(test_data_y - scaler_y.inverse_transform(nn.output)).mean()}'
            )
            plt.show()

    plt.scatter(test_data_x, test_data_y)
    plt.title(f'Function')
    plt.show()

    pr = nn.predict(scaled_x_test)
    plt.scatter(test_data_x, scaler_y.inverse_transform(pr))
    plt.title(
        f'End, Error: '
        f'{np.square(test_data_y - scaler_y.inverse_transform(pr)).mean()}')
    plt.show()
Exemplo n.º 4
0
    else:
        negatives.append(inputs[i])
positives_array = np.zeros((2, len(positives)))
negatives_array = np.zeros((2, len(negatives)))
for i in range(len(positives)):
    positives_array[0, i] = positives[i][0, 0]
    positives_array[1, i] = positives[i][1, 0]
for i in range(len(negatives)):
    negatives_array[0, i] = negatives[i][0, 0]
    negatives_array[1, i] = negatives[i][1, 0]

# Creating and training the neural network
neural_network = NeuralNetwork(2, 10, 1, 0.03)
costs = np.zeros(num_epochs)
for i in range(num_epochs):
    neural_network.back_propagation(inputs, expected_outputs)
    costs[i] = neural_network.compute_cost(inputs, expected_outputs)
    print('epoch: %d; cost: %f' % (i + 1, costs[i]))

# Plotting cost function convergence
plt.plot(costs)
plt.xlabel('Epoch')
plt.ylabel('Cost')
plt.title('Cost Function Convergence')
plt.grid()
plt.savefig('cost_function_convergence.' + fig_format, format=fig_format)

# Plotting positive and negative samples
plt.figure()
plt.plot(positives_array[0, :], positives_array[1, :], '+r')
plt.plot(negatives_array[0, :], negatives_array[1, :], 'x')
Exemplo n.º 5
0
def main():
    # Initialise screen
    pygame.init()
    screen = pygame.display.set_mode((screen_x, screen_y))
    pygame.display.set_caption(
        'Pong Game Controlled by Artificial Neural Network')

    # Fill background
    background = pygame.Surface(screen.get_size())
    background = background.convert()
    background.fill((255, 255, 255))

    # Initialise players
    global player1
    global player2
    player1 = Player("left")
    player2 = Player("right")

    # Initialise ball
    speed = 13
    rand = ((0.1 * (random.randint(5, 8))))
    ball = Ball((0.47, speed))

    # Initialise sprites
    playersprites = pygame.sprite.RenderPlain((player1, player2))
    ballsprite = pygame.sprite.RenderPlain(ball)

    # Blit everything to the screen
    screen.blit(background, (0, 0))
    pygame.display.flip()

    # Initialise clock
    clock = pygame.time.Clock()

    # Initialise NeuralNetwork
    nn = NeuralNetwork()
    nn.set_random_w()
    input = [0, 0, 0]
    output = [0]
    t_output = [0]

    # Font Initialization
    pygame.font.init()
    font_title = pygame.font.SysFont('Comic Sans MS', 20)
    font_comment = pygame.font.SysFont('Comic Sans MS', 12)
    text_title = font_title.render("Neural Network Pong", False, (0, 0, 0))
    text_controls = font_comment.render(
        "Keyboard control: 'a' - Player Up, 'y' - Player Down", False,
        (100, 100, 100))

    # Event loop
    while True:
        clock.tick(60)

        for event in pygame.event.get():
            if event.type == QUIT:
                return
            elif event.type == KEYDOWN:
                if event.key == K_a:
                    player1.moveup()
                if event.key == K_z:
                    player1.movedown()
                if event.key == K_UP:
                    player2.moveup()
                if event.key == K_DOWN:
                    player2.movedown()
            elif event.type == KEYUP:
                if event.key == K_a or event.key == K_z:
                    player1.movepos = [0, 0]
                    player1.state = "still"
                if event.key == K_UP or event.key == K_DOWN:
                    player2.movepos = [0, 0]
                    player2.state = "still"

        (x, y) = ball.get_position()
        angle = ball.get_angle()
        (xp, yp) = player2.get_position()

        input[0] = x
        input[1] = y
        input[2] = angle

        nn.set_input(input)
        nn.feed_forward()
        output = nn.get_output()

        t_output[0] = y

        nn.set_delta_output(t_output)

        nn.back_propagation()
        nn.adjust_w()

        if output[0] > y:
            player2.moveup()
        else:
            player2.movedown()
        print('Output = {} Position = {}'.format(output[0], x))

        screen.blit(text_title, (230, 0))
        screen.blit(text_controls, (20, 450))
        screen.blit(background, ball.rect, ball.rect)
        screen.blit(background, player1.rect, player1.rect)
        screen.blit(background, player2.rect, player2.rect)
        ballsprite.update()
        playersprites.update()
        ballsprite.draw(screen)
        playersprites.draw(screen)
        pygame.display.flip()
Exemplo n.º 6
0
class NetworkTest(TestCase):
    def setUp(self) -> None:
        """
        Sets up unittest
        """
        self.network = NeuralNetwork(2, [5, 4], 1, [tanh, tanh, sigmoid], 0.1)
        self.x_input = np.array([40, 40])
        self.output = np.array([1.0])
        self.batch_input = np.random.uniform(-50, 50, (2, SIZE))
        self.batch_output = np.random.choice([0.0, 1.0], (1, SIZE))

    def test_constructor_exception(self):
        test = False
        try:
            NeuralNetwork(2, [1, 1], 1, [sigmoid], 0.1)
        except IndexError:
            test = True
        assert test
        net = None
        try:
            net = NeuralNetwork(2, [1, 1], 1, [sigmoid, tanh, tanh, sigmoid],
                                0.1)
        except IndexError:
            test = False
        assert test
        actual = net.feed_forward(self.x_input)
        assert actual.shape == (1, 1)

    def test_layers(self):
        assert len(self.network.layers) == 3
        a_network = NeuralNetwork(2, [], 1, [sigmoid], 0)
        assert len(a_network.layers) == 1
        assert a_network.layers[0].w.shape == (1, 2)
        assert a_network.layers[0].activation_function == sigmoid

    def test_feed_forward(self):
        #  As network:
        actual = self.network.feed_forward(self.x_input)
        #  As layers
        expected = self.network.layers[0].feed(self.x_input.reshape(-1, 1),
                                               save=False)
        assert np.equal(expected, self.network.layers[0].output).all()
        expected = self.network.layers[1].feed(expected, save=False)
        assert np.equal(expected, self.network.layers[1].output).all()
        expected = self.network.layers[2].feed(expected, save=False)
        assert np.equal(expected, self.network.layers[2].output).all()
        assert np.equal(expected, actual).all()

    def test_back_propagation(self):
        #  As layers
        hidden_output1 = self.network.layers[-3].feed(self.x_input.reshape(
            -1, 1),
                                                      save=False)
        hidden_output2 = self.network.layers[-2].feed(hidden_output1,
                                                      save=False)
        actual_output = self.network.layers[-1].feed(hidden_output2,
                                                     save=False)
        error = actual_output - self.output
        expected1 = np.multiply(error,
                                derivative[sigmoid](output=actual_output))
        error = np.dot(self.network.layers[-1].w.T, expected1)
        expected2 = np.multiply(error, derivative[tanh](output=hidden_output2))
        error = np.dot(self.network.layers[-2].w.T, expected2)
        expected3 = np.multiply(error, derivative[tanh](output=hidden_output1))
        #  As network
        self.network.feed_forward(self.x_input)
        self.network.back_propagation(self.output)
        assert np.equal(expected1, self.network.layers[-1].delta).all()
        assert np.equal(expected2, self.network.layers[-2].delta).all()
        assert np.equal(expected3, self.network.layers[-3].delta).all()

    def test_update_weight(self):
        self.network.feed_forward(self.x_input)
        self.network.back_propagation(self.output)
        #  Old weights
        w1 = self.network.layers[0].w
        b1 = self.network.layers[0].b
        w2 = self.network.layers[1].w
        b2 = self.network.layers[1].b
        w3 = self.network.layers[2].w
        b3 = self.network.layers[2].b
        self.network.layers[0].update_weights(self.x_input.reshape(-1, 1), 0.1)
        self.network.layers[1].update_weights(self.network.layers[0].output,
                                              0.1)
        self.network.layers[2].update_weights(self.network.layers[1].output,
                                              0.1)
        expected_list = [
            self.network.layers[0].w, self.network.layers[0].b,
            self.network.layers[1].w, self.network.layers[1].b,
            self.network.layers[2].w, self.network.layers[2].b
        ]
        #  Return to old weights
        self.network.layers[0].w = w1
        self.network.layers[0].b = b1
        self.network.layers[1].w = w2
        self.network.layers[1].b = b2
        self.network.layers[2].w = w3
        self.network.layers[2].b = b3
        self.network.update_weight(self.x_input)
        actual = [
            self.network.layers[0].w, self.network.layers[0].b,
            self.network.layers[1].w, self.network.layers[1].b,
            self.network.layers[2].w, self.network.layers[2].b
        ]
        for index, expected in enumerate(expected_list):
            assert np.equal(actual[index], expected).all()

    def test_feed_forward_batch(self):
        #  As network:
        actual = self.network.feed_forward(self.batch_input)
        #  As layers
        expected = self.network.layers[0].feed(self.batch_input, save=False)
        assert np.equal(expected, self.network.layers[0].output).all()
        expected = self.network.layers[1].feed(expected, save=False)
        assert np.equal(expected, self.network.layers[1].output).all()
        expected = self.network.layers[2].feed(expected, save=False)
        assert np.equal(expected, self.network.layers[2].output).all()
        assert np.equal(expected, actual).all()

    def test_back_propagation_batch(self):
        #  As layers
        hidden_output1 = self.network.layers[-3].feed(self.batch_input,
                                                      save=False)
        hidden_output2 = self.network.layers[-2].feed(hidden_output1,
                                                      save=False)
        actual_output = self.network.layers[-1].feed(hidden_output2,
                                                     save=False)
        error = actual_output - self.batch_output
        expected1 = np.multiply(error,
                                derivative[sigmoid](output=actual_output))
        error = np.dot(self.network.layers[-1].w.T, expected1)
        expected2 = np.multiply(error, derivative[tanh](output=hidden_output2))
        error = np.dot(self.network.layers[-2].w.T, expected2)
        expected3 = np.multiply(error, derivative[tanh](output=hidden_output1))
        #  As network
        self.network.feed_forward(self.batch_input)
        self.network.back_propagation(self.batch_output)
        assert np.equal(expected1, self.network.layers[-1].delta).all()
        assert np.equal(expected2, self.network.layers[-2].delta).all()
        assert np.equal(expected3, self.network.layers[-3].delta).all()

    def test_update_weight_batch(self):
        self.network.feed_forward(self.batch_input)
        self.network.back_propagation(self.batch_output)
        #  Old weights
        w1 = self.network.layers[0].w
        b1 = self.network.layers[0].b
        w2 = self.network.layers[1].w
        b2 = self.network.layers[1].b
        w3 = self.network.layers[2].w
        b3 = self.network.layers[2].b
        self.network.layers[0].update_weights(self.batch_input, 0.1)
        self.network.layers[1].update_weights(self.network.layers[0].output,
                                              0.1)
        self.network.layers[2].update_weights(self.network.layers[1].output,
                                              0.1)
        expected_list = [
            self.network.layers[0].w, self.network.layers[0].b,
            self.network.layers[1].w, self.network.layers[1].b,
            self.network.layers[2].w, self.network.layers[2].b
        ]
        #  Return to old weights
        self.network.layers[0].w = w1
        self.network.layers[0].b = b1
        self.network.layers[1].w = w2
        self.network.layers[1].b = b2
        self.network.layers[2].w = w3
        self.network.layers[2].b = b3
        self.network.update_weight(self.batch_input)
        actual = [
            self.network.layers[0].w, self.network.layers[0].b,
            self.network.layers[1].w, self.network.layers[1].b,
            self.network.layers[2].w, self.network.layers[2].b
        ]
        for index, expected in enumerate(expected_list):
            assert np.equal(actual[index], expected).all()

    def test_exception_train(self):
        test = False
        try:
            self.network.train(self.batch_input, self.output)
        except ValueError as e:
            logging.info(e.__str__())
            test = True
        assert test
        test = False
        try:
            self.network.update_weight(self.batch_input)
        except ValueError as e:
            logging.info(e.__str__())
            test = True
        assert test

    def test_train_repeat(self):
        learning, costs = self.network.train(self.batch_input,
                                             self.batch_output,
                                             epochs=EPOCHS,
                                             repeat=True)
        assert len(learning) == len(costs)
        assert len(learning) == EPOCHS

    def test_train_split(self):
        learning, costs = self.network.train(self.batch_input,
                                             self.batch_output,
                                             epochs=EPOCHS)
        assert len(learning) == len(costs)
        assert len(learning) == EPOCHS
        learning, costs = self.network.train(self.batch_input,
                                             self.batch_output,
                                             epochs=70)
        assert len(learning) == len(costs)
        assert len(learning) == 70
Exemplo n.º 7
0
    err_total = 0.0
    for i in range(len(test_inputs)):
        o = neuralnet.forward_propagate(test_inputs[data_no])
        err = (test_outputs[data_no] - o)**2 / 2
        err_total += err
    return err_total


if __name__ == "__main__":
    learning_time_limit = 10000  # 学習回数の上限(打ち切り条件)
    error_boundary = 0.01  # 誤差値によるの終了判定基準
    neuralnet = NeuralNetwork([2, 3, 1], 0.1, 1.0)
    data_indexs = np.arange(len(teach_inputs))
    for n in range(learning_time_limit):
        np.random.shuffle(data_indexs)
        for data_no in data_indexs:
            # 順伝播計算
            neuralnet.forward_propagate(teach_inputs[data_no])
            # 逆伝播学習
            neuralnet.back_propagation(teach_outputs[data_no])
        # 検証
        err = validate(neuralnet, teach_inputs, teach_outputs)
        if err <= error_boundary:
            print("finished in %d times of learning" % (n))
            break
    for data_no in range(len(teach_inputs)):
        o = neuralnet.forward_propagate(teach_inputs[data_no])
        print(o, (teach_outputs[data_no] - o)**2 / 2)
        print("total")
        print(validate(neuralnet, teach_inputs, teach_outputs))