Esempio n. 1
0
def build_network():

    network = nn.neural_network(LAYER_LEN + 2)

    # input layer
    network.create_neuron(0, [1, 0, 0, 0], 1, 'linear')
    network.create_neuron(0, [0, 1, 0, 0], 1, 'linear')
    network.create_neuron(0, [0, 0, 1, 0], 1, 'linear')
    network.create_neuron(0, [0, 0, 0, 1], 1, 'linear')

    # hidden layer
    for l in range(1, LAYER_LEN + 1, 1):
        lbc = len(network.layers[l - 1])
        for _ in range(HIDDEN_LEN):
            h = network.create_neuron(l, nn.rand_weights(lbc), 1, 'log')
            for uid in network.layers[l - 1]:
                network.create_connection(uid, h)

    # output layer
    lbc = len(network.layers[-2])
    out1 = network.create_neuron(LAYER_LEN + 1, nn.rand_weights(lbc), 1, 'log')
    out2 = network.create_neuron(LAYER_LEN + 1, nn.rand_weights(lbc), 1, 'log')
    out3 = network.create_neuron(LAYER_LEN + 1, nn.rand_weights(lbc), 1, 'log')
    for uid in network.layers[-2]:
        network.create_connection(uid, out1)
        network.create_connection(uid, out2)
        network.create_connection(uid, out3)

    return network
Esempio n. 2
0
    def __init__(self, x_offset, y_offset, template=None):

        self.longest_chain = 0
        self.time_alive = 0
        self.finished = False

        self.board = [[' ' for x in range(self.w)] for y in range(self.h)]
        self.prev_board = [[' ' for x in range(self.w)] for y in range(self.h)]

        self.offset = (x_offset, y_offset)
        # create a neural network for the player
        # first layer = size of input (ie: the size of the board)
        # several internal layers, each with an experimental size
        # finally an output layer, with 4 outputs (one corresponding to each action)

        # TODO: calculate a list of available placements given the board, set these as the 'output' of NN
        # I'm interested to see how this affects how the ai solves the problem
        self.net = neuralnetwork.neural_network([(self.w * self.h) + 6 + 4,
                                                 250, 250, 125, 125, 4])

        # scoring variables
        self.chain = 0  # the current chain length
        self.chain_size = 0  # the number of puyos in the current chain
        self.score = 0  # the players score
        self.diff_colors_in_chain = set(
        )  # the size of this will let us determine a bonus
        self.chain_group_sizes = []  # the size of each group in a chain

        self.puyo_to_remove = set()
        self.falling = None
        self.trigger = None
        self.moves = [
            ((-1, 0), (0, 0)), ((-1, 1), (0, 1)), ((-1, 2), (0, 2)),
            ((-1, 3), (0, 3)), ((-1, 4), (0, 4)), ((-1, 5), (0, 5)),
            ((0, 0), (-1, 0)), ((0, 1), (-1, 1)), ((0, 2), (-1, 2)),
            ((0, 3), (-1, 3)), ((0, 4), (-1, 4)), ((0, 5), (-1, 5)),
            ((-1, 0), (-1, 1)), ((-1, 1), (-1, 2)), ((-1, 2), (-1, 3)),
            ((-1, 3), (-1, 4)), ((-1, 4), (-1, 5)), ((-1, 1), (-1, 0)),
            ((-1, 2), (-1, 1)), ((-1, 3), (-1, 2)), ((-1, 4), (-1, 3)),
            ((-1, 5), (-1, 4))
        ]
        self.buffer = [self.next(), self.next()]
        self.current = self.next()

        row = 0
        col = 0
        if template:
            for puyos in template:
                if puyos == "\n":
                    col += 1
                    row = 0
                    continue
                else:
                    self.board[col][row] = puyos
                    row += 1
Esempio n. 3
0
def build_network():
    network = nn.neural_network(2)

    # input layer
    network.create_neuron(0, [1, 0, 0, 0])
    network.create_neuron(0, [0, 1, 0, 0])
    network.create_neuron(0, [0, 0, 1, 0])
    network.create_neuron(0, [0, 0, 0, 1])

    # output layer
    for _ in range(15):
        uid = network.create_neuron(1, nn.rand_weights(4))
        network.create_connection(0, uid)
        network.create_connection(1, uid)
        network.create_connection(2, uid)
        network.create_connection(3, uid)

    return network
Esempio n. 4
0
def build_network():

    network = nn.neural_network(3)

    # input layer
    in1 = network.create_neuron(0, [1, 0], 0.5, 'threshold')
    in2 = network.create_neuron(0, [0, 1], 0.5, 'threshold')

    # hidden layer
    huid = []
    for _ in range(HIDDEN_LEN):
        h = network.create_neuron(1, nn.rand_weights(2), 1, 'log')
        network.create_connection(in1, h)
        network.create_connection(in2, h)
        huid.append(h)

    # output layer
    out1 = network.create_neuron(2, nn.rand_weights(HIDDEN_LEN), 1, 'log')
    for uid in huid:
        network.create_connection(uid, out1)

    return network
import numpy as np
import neuralnetwork as nn
from mnist import MNIST

print("Starting...")

mndata = MNIST('mnist-dataset')
images, labels = mndata.load_training()

print("Training...")

# train
num_classes = 10
targets = np.array([labels]).reshape(-1)
one_hot_targets = np.eye(num_classes)[labels]
net = nn.neural_network(3, [784, 20, 10], [None, "tanh", "softmax"],
                        cost_function="cross_entropy")
net.train(1,
          inputs=images,
          labels=one_hot_targets,
          num_epochs=5,
          learning_rate=0.001,
          filename="savepoint.pkl")

print("Testing...")

# test
images, labels = mndata.load_testing()
targets = np.array([labels]).reshape(-1)
one_hot_targets = np.eye(num_classes)[labels]
net.check_accuracy("savepoint.pkl", images, one_hot_targets)