Exemple #1
0
    def backprop(self, x, y):
        nabla_b = [np.zeros(b.shape) for b in self.biases]
        nabla_w = [np.zeros(w.shape) for w in self.weights]

        activation = x
        activations = [x]
        zs = []
        for b, w in zip(self.biases, self.weights):
            #print w
            #print len(activation)
            #print len(w)
            z = np.dot(w, activation) + b
            #print z
            zs.append(z)
            activation = myalgorithm.sigmoid(z)
            #print activation
            activations.append(activation)

        #print activations[-1]
        #print y
        delta = self.cost_derivative(activations[-1],
                                     y) * myalgorithm.sigmoid_prime(zs[-1])
        nabla_b[-1] = delta
        nabla_w[-1] = np.dot(delta, activations[-2].transpose())
        for l in xrange(2, self.num_layers):
            z = zs[-l]
            sp = myalgorithm.sigmoid_prime(z)
            delta = np.dot(self.weights[-l + 1].transpose(), delta) * sp
            nabla_b[-l] = delta
            nabla_w[-l] = np.dot(delta, activations[-l - 1].transpose())
        return (nabla_b, nabla_w)
Exemple #2
0
 def feedforward(self, a):
     for b, w in zip(self.biases, self.weights):
         a = myalgorithm.sigmoid(np.dot(w, a) + b)
     # print a
     return a
Exemple #3
0
 def outcome(self, a):
     for b, w in zip(self.biases, self.weights):
         a = myalgorithm.sigmoid(np.dot(w, a) + b)
     return np.argmax(a)
def outcome(weights, biases, a):
    for b, w in zip(biases, weights):
        a = myalgorithm.sigmoid(numpy.dot(w, a) + b)
    return numpy.argmax(a)