def main():
    training_data, validation_data, test_data = mnist_loader_with_unknowns.load_data_wrapper(
    )
    size = 10
    print len(training_data)
    hidden_neurons = 30  #input("Enter the number of hidden neurons:")
    net = network.Network([784, hidden_neurons, 11])
    num_of_epochs = 20  #input("Enter the number of epochs:")
    min_batch_size = 50  #input("Enter the mini-batch size:")
    net.SGD(training_data, num_of_epochs, min_batch_size,
            3.0)  #, test_data=test_data)
    win = GraphWin('Number', size * 28, size * 28)  # give title and dimensions
    entry = input("Which number to display?")
    while entry <> 0:
        graphical_output.update_number(training_data[entry][0], size, win)
        counter = 0
        answer = 0
        #print("counter", counter)
        for numeral in training_data[entry][1]:
            #print ("numeral", numeral)
            if numeral == 1:
                answer = counter
            counter += 1
            #print("answer", answer)
            #print("counter", counter)
        if answer == 10:
            answer = "unknown"
        #print("answer", answer)
        print("This is supposed to be a ", answer)
        activations = []
        for layer1 in range(hidden_neurons):
            #activation = sigmoid(np.dot(net.weights[0][layer1], np.transpose(new_number)))
            activation = sigmoid(
                np.dot(net.weights[0][layer1], training_data[entry][0]))
            activations.append(activation)
        outputs = []
        for output_neuron in range(11):
            output = sigmoid(np.dot(net.weights[1][output_neuron],
                                    activations))
            outputs.append(output)
        #print outputs
        counter = 0
        for digit in outputs:
            print(counter, " ", "|" * int(digit * 100))
            counter += 1
        entry = input("Which number to display?")
        for item in win.items[:]:
            item.undraw()
Beispiel #2
0
def update_line(win, cur_layer, prev_layer, i, j):
    line = Line(cur_layer.neurons[i].position, prev_layer.neurons[j].position)
    if cur_layer.weights[i][j] < 0:
        line.setOutline("red")
    else:
        line.setOutline("blue")
    line.setWidth(8 * (sigmoid(abs(cur_layer.weights[i][j])) - 0.5))
    line.draw(win)
Beispiel #3
0
    def backprop(self, x, y):
        """Return a tuple ``(nabla_b, nabla_w)`` representing the
		gradient for the cost function C_x.  ``nabla_b`` and
		``nabla_w`` are layer-by-layer lists of numpy arrays, similar
		to ``self.biases`` and ``self.weights``."""
        # x is a matrix of test data, with each test case indexed in the column direction
        # y is a matrix of test data answers, with each answer indexed in the column direction

        # nabla_b represent the derivitives of the biases for each layer
        # nabla_w represent the derivatives of the weights for each layer

        nabla_b = [np.zeros(b.shape) for b in self.biases]
        nabla_w = [np.zeros(w.shape) for w in self.weights]

        nabla_b = []
        nabla_w = []

        test_data_len = x.shape[1]

        for b, w in zip(self.biases, self.weights):
            nabla_b.append(np.zeros((b.shape[0], test_data_len)))
            nabla_w.append(np.zeros(w.shape))

        # feedforward
        activation = x
        activations = [x]  # list to store all the activations, layer by layer
        zs = []  # list to store all the z vectors, layer by layer

        for b, w in zip(self.biases, self.weights):
            z = np.dot(w, activation) + b
            zs.append(z)
            activation = sigmoid(z)
            activations.append(activation)

        # backward pass
        delta = self.cost_derivative(activations[-1], y) * sigmoid_prime(
            zs[-1])
        nabla_b[-1] = delta
        nabla_w[-1] = np.dot(delta, activations[-2].transpose())

        # Note that the variable l in the loop below is used a little
        # differently to the notation in Chapter 2 of the book.  Here,
        # l = 1 means the last layer of neurons, l = 2 is the
        # second-last layer, and so on.  It's a renumbering of the
        # scheme in the book, used here to take advantage of the fact
        # that Python can use negative indices in lists.

        for l in xrange(2, self.num_layers):
            z = zs[-l]
            sp = sigmoid_prime(z)
            delta = np.dot(self.weights[-l + 1].transpose(), delta) * sp
            nabla_b[-l] = delta
            nabla_w[-l] = np.dot(delta, activations[-l - 1].transpose())

        return (nabla_b, nabla_w)
Beispiel #4
0
def main():
    training_data, validation_data, test_data = mnist_loader_with_unknowns.load_data_wrapper(
    )
    print len(training_data)
    #print len(validation_data)
    #print len(test_data)
    hidden_neurons = 100  #input("Enter the number of hidden neurons:")
    net = network.Network([784, hidden_neurons, 11])
    num_of_epochs = 20  #input("Enter the number of epochs:")
    min_batch_size = 50  #input("Enter the mini-batch size:")
    net.SGD(training_data, num_of_epochs, min_batch_size,
            3.0)  #, test_data=test_data)
    #graphical_output.draw_new_number(training_data[0][0], 10)
    #print training_data[0][1]

    size = 20
    win = GraphWin('Number', size * 28, size * 28)  # give title and dimensions
    #print type(new_number)
    new_number = training_data[0][0]
    graphical_output.update_number(new_number, size, win)
    revised_training_pairs = []
    for new in range(10):
        new_number = training_data[new][0]
        target = new  #random.randrange(0,10) #int(input("What number do you want to draw?:"))
        print "Target = ", target
        #nu2 = 20 #input("Input the learning rate:")
        #new_number = np.random.rand(784)
        for gradient in range(20):
            activations = []
            for layer1 in range(hidden_neurons):
                #activation = sigmoid(np.dot(net.weights[0][layer1], np.transpose(new_number)))
                activation = sigmoid(np.dot(net.weights[0][layer1],
                                            new_number))
                activations.append(activation)
            outputs = []
            for output_neuron in range(11):
                output = sigmoid(
                    np.dot(net.weights[1][output_neuron], activations))
                outputs.append(output)
            #print outputs
            cost = np.zeros(784)
            for edit_pixel in range(784):
                for output_neuron in range(11):
                    level0weights = []
                    for j in range(hidden_neurons):
                        level0weights.append(net.weights[0][j][edit_pixel])
                    #print level0weights
                    #print vprime(activations)
                    partial_derivative = np.multiply(
                        level0weights, np.transpose(vprime(activations)))
                    partial_output = prime(outputs[output_neuron]) * np.dot(
                        net.weights[1][output_neuron],
                        np.transpose(partial_derivative))
                    #print partial_output
                    if output_neuron == target:
                        cost[edit_pixel] = -2 * (
                            1 - outputs[output_neuron]) * partial_output
                    else:
                        cost[edit_pixel] = 2 * outputs[
                            output_neuron] * partial_output
            nu2 = 0.5 / (np.amax(cost) - np.amin(cost))
            for edit_pixel in range(784):
                new_number[edit_pixel] = min(
                    1, max(0, new_number[edit_pixel] - nu2 * cost[edit_pixel]))
            for item in win.items[:]:
                item.undraw()
            update_number(new_number, size, win)
        revised_training_pairs.append((new_number, 10))
        for item in win.items[:]:
            item.undraw()
    #win = GraphWin('Number', size*28, size*28) # give title and dimensions
    #print len(revised_training_pairs[0][0])
    #print type(revised_training_pairs[0][0])
    #graphical_output.update_number(revised_training_pairs[0][0],size,win)
    #win.getMouse()
    #for x in range(10):
    #graphical_output.update_number(revised_training_pairs[x][0],size,win)
    #win.getMouse()
    #print outputs
    #e = np.zeros((11, 1))
    #e[10] = 1.0

    enter = 1  #input("Do you want to save this back to the MNIST data? (1=y/0=n):")
    if enter == 1:
        f = gzip.open('../data/revised-mnist.pkl.gz', 'rb')
        training_data, validation_data, test_data = cPickle.load(f)
        f.close()
        #print type(training_data[1])
        #print type(change_to_number(training_data[1]))
        #print len(change_to_number(training_data[1]))
        for x, y in zip(training_data[0], training_data[1]):
            revised_training_pairs.append((x, y))
        #revised_training_pairs.append((new_number, 10))
        #revised_validation_pairs = []
        #for x, y in zip(validation_data[0], validation_data[1]):
        #	revised_validation_pairs.append((x, y))
        #revised_test_pairs = []
        #for x, y in zip(test_data[0], test_data[1]):
        #	revised_test_pairs.append((x, y))
        revised_training_data = [list(d) for d in zip(*revised_training_pairs)]
        #revised_validation_data = [list(d) for d in zip(*revised_validation_pairs)]
        #revised_test_data = [list(d) for d in zip(*revised_test_pairs)]
        #print len(revised_training_data)
        #print len(revised_validation_data)
        #print len(revised_test_data)
        print("Saving expanded data. This may take a few minutes.")
        f = gzip.open("../data/revised-mnist.pkl.gz", "w")
        cPickle.dump((revised_training_data, validation_data, test_data), f)
        f.close()
Beispiel #5
0
import network as n
p = n.sigmoid(2)
print p
Beispiel #6
0
def feedforward(a, weight0, weight1):

    l = a.T
    b1 = net.sigmoid(np.dot(l, weight0))
    b2 = net.sigmoid(np.dot(b1, weight1))
    return b2