Пример #1
0
    def feed_forward(self, input, layer=0):
        # If layer is 0, we're starting a new epoch, wipe the activation table
        if (layer == 0): self.activations = []

        if (DEBUG):
            log(
                Mode.DEBUG, "L" + str(layer) + " Weight Table: " +
                str(self.weights[layer]))

        # If we've recured down all layers, process output layer and return
        if (layer == self.layers):
            output_layer = []
            for _ in range(0, self.output_size):
                h = self.weights[layer][
                    0] * self.bias  # Start with adding the bias to the node calculation
                h += numpy.matmul(
                    self.weights[layer][1:],
                    input)  # Sum the product of weights and inputs
                output_layer.append(self.activate(h))
            self.activations.append(output_layer)
            if (self.rsoftmax): return self.softmax(output_layer)
            else: return numpy.around(output_layer, self.accuracy)

        # Recur down the layers normally
        activations = []
        for node in range(0, self.width):
            # Start with the product of the bias and weight
            h = self.weights[layer][
                0] * self.bias  # Start with adding the bias to the node calculation
            h += self.weights[layer][2:] * input[
                layer]  # Sum the product of weights and inputs
            activations.append(self.activate(h))
        self.activations.append(activations)

        return self.feed_forward(activations, layer + 1)
Пример #2
0
    def train(self):
        MST = self.tolerance * self.tolerance # Mean square tolerance
        self.activations = []
        for i in range(0, len(self.data)):
            output = self.feed_forward(self.data[i])[0]
            error = self.targets[i] - output
            error *= error

            log(Mode.INFO, "Target: " + str(self.targets[i]) + ",\n\tOutput: " + str(output) + ",\n\tMSE: " + str(error) + "\n")

            if(error > MST): self.back_propogate()
            else: break;
Пример #3
0
def main():
    try:
        targets, training_data = retrieve_from_csv(
            "./mnist_data/mnist_demo.csv")
    except FileNotFoundError as e:
        log(Mode.ERROR, str(e))
        sys.exit(-1)

    # Preprocess the data
    targets = preprocess(targets)
    _pd = []
    for row in training_data:
        _pd.append(preprocess(row))
    training_data = _pd

    # Create the neural network
    training_model = NeuralNetwork(targets,
                                   training_data,
                                   outputs=1,
                                   layers=100,
                                   learning_rate=0.1,
                                   momentum=0.9,
                                   accuracy=20,
                                   ret_type=Type.softmax)
    log(Mode.INFO, "Created neural network: " + str(training_model))

    # Run the training model
    log(Mode.WARN, "Training started!")
    training_model.train()
    log(Mode.WARN, "Training ended!")
Пример #4
0
    def back_propogate(self, layer, target=None, ptaus=None):
        if layer < 0: return

        if (DEBUG):
            log(Mode.DEBUG, "Driving back propogation for L" + str(layer))

        # Calculate the taus dependant on hidden or output layer
        taus = []
        deltas = []
        if (target is not None and ptaus is None):  # Output layer
            for i in range(0, self.width + 1):
                for h in self.activations[layer]:
                    taus.append(h * (1 - h) * (target - h))

            deltas.append(self.learning_rate * taus[0] * self.bias)
            for i in range(0, self.width):
                deltas.append(self.learning_rate * taus[i] *
                              self.activations[layer - 1][i])
            self.weights[layer] += deltas
        elif (target is None and ptaus is not None):  # Hidden layer
            sum = self.weights[layer] * ptaus
            for i in range(0, self.width):
                h = self.activations[layer][i]
                taus.append(h * (1 - h) * sum[0])

            deltas.append(self.learning_rate * taus[0] * self.bias)
            for i in range(0, self.width):
                deltas.append(self.learning_rate * taus[i] * self.bias)
            input = []
            input.append(self.bias)
            input = numpy.append(input, self.data[layer])
            deltas[1:] *= input
            self.weights[layer] += deltas

        else:
            log(Mode.ERROR,
                "Invalid set of arguments for backprop at L" + str(layer))
            return

        return self.back_propogate(layer - 1, ptaus=taus)
Пример #5
0
 def handle(self):
     self.data = str(self.rfile.readline().strip())
     log(Mode.INFO, "[" + self.client_address[0] + "]: " + str(self.data))
     self.wfile.write(bytes(self.data.upper()), "UTF-8")
Пример #6
0
#!/usr/bin/env python3

import socket, sys, time
from ftfutils import Mode, log

host = 'localhost'
port = 8081

log(Mode.INFO, "Creating the socket: " + str(host) + ":" + str(port))

try:
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error:
    log(Mode.ERROR, "Failed to create socket!")
    sys.exit(-1)

log(Mode.INFO, "Getting remote IP...")
try:
    remote_ip = socket.gethostbyname(host)
except socket.gaierror:
    log(Mode.ERROR, "Failed to find a server!")
    sys.exit(-1)

log(Mode.INFO, "Connecting to server: " + str(remote_ip) + ":" + str(port))
sock.connect((remote_ip, port))

while True:
    request = "GET / HTTP/1.0\r\n\r\n"
    log(Mode.INFO, "Sending request: " + request)

    try: