コード例 #1
0
def main(train=False):
    if train:
        digit_arrays = {
            str(digit): [
                load_image(f"{BASE_PATH}{digit}_{index}.png")
                for index in range(HOW_MANY)
            ]
            for digit in range(DIGITS)
        }

        flat_arrays = {
            digit: [image_to_bipolar_array(image) for image in images]
            for digit, images in digit_arrays.items()
        }

        network = Perceptron(labels=list(digit_arrays.keys()),
                             learning_rate=LEARNING_RATE,
                             activation_function=activation_function)

        for label, sources in flat_arrays.items():
            for source in sources:
                network.add_sources(label, source)

        network.train(random_starting_weights=True)
        network.save_neurons(NEURONS_PICKLE_PATH)

        print(f"Cycles: {network.cycles}")
        print(f"Adjustments: {network.adjustments}\n")

    else:
        network = Perceptron()
        network.load_neurons(NEURONS_PICKLE_PATH)

    draw = Drawing()

    while True:
        test_image = draw.get_character()
        # test_image = load_image(f"{BASE_PATH}0_0.png")

        if test_image is None:
            break

        flat = image_to_bipolar_array(test_image)
        out = network.output(flat)

        predict = [
            digit for digit, output in out.items() if output == ACTIVATED
        ]

        if len(predict) == 0:
            out_string = "The network doesn't know what digit that is."
        elif len(predict) == 1:
            out_string = f"The network thinks that's a {predict[0]}."
        else:
            out_string = f"The network thinks that might be one of these digits: {predict}."
        print(out_string)
コード例 #2
0
def main():
    x_letter = np.full(GRID_SHAPE, NOT_ACTIVATED, dtype=np.int8)
    np.fill_diagonal(x_letter, ACTIVATED)
    x_letter = np.fliplr(x_letter)
    np.fill_diagonal(x_letter, ACTIVATED)
    x_flat = x_letter.flatten()

    t_letter = np.full(GRID_SHAPE, NOT_ACTIVATED, dtype=np.int8)
    t_letter[0, :] = [ACTIVATED] * GRID_WIDTH
    mid_col = GRID_SHAPE[1] // 2
    t_letter[:, mid_col] = [ACTIVATED] * GRID_HEIGHT
    t_flat = t_letter.flatten()

    targets = {"x": ACTIVATED, "t": NOT_ACTIVATED}

    network = Perceptron(LEARNING_RATE, activation_function)

    network.add_sources(x_flat, targets["x"])
    network.add_sources(t_flat, targets["t"])

    network.train(random_starting_weights=True)

    print(
        f'\n>>Perceptron results:\n\n'
        f'Starting weights:'
        f'\n{np.array2string(np.array(network.starting_weights), precision=5, max_line_width=10*GRID_WIDTH)}\n'
        f'Starting bias: {network.starting_bias:.5f}\n\n'
        f'Final weights:'
        f'\n{np.array2string(np.array(network.weights), precision=5, max_line_width=10*GRID_WIDTH)}\n'
        f'Final bias: {network.bias:.5f}\n\n'
        f'Total cycles: {network.cycles}\n')

    x_output = network.output(x_flat)
    t_output = network.output(t_flat)

    x_activation = "Activated" if x_output == ACTIVATED else "Not activated"
    t_activation = "Activated" if t_output == ACTIVATED else "Not activated"

    print(f'Output for letter X: {x_activation}\n'
          f'Output for letter T: {t_activation}\n')