def backprop(self, x, y): nabla_b = [np.zeros(b.shape) for b in self.net.biases] nabla_w = [np.zeros(w.shape) for w in self.net.weights] # feedforward activation = x activations = [x] # list to store all the activations, layer by layer zs = list() # list to store all the z vectors, layer by layer for b, w in zip(self.net.biases, self.net.weights): z = np.dot(w, activation)+b zs.append(z) activation = sigmoid(z) activations.append(activation) # backward pass delta = (activations[-1]-y)*sigmoid_prime(zs[-1]) nabla_b[-1] = delta nabla_w[-1] = np.dot(delta, activations[-2].transpose()) for l in xrange(2, len(self.net.n_neurons)): z = zs[-l] sp = sigmoid_prime(z) delta = np.dot(self.net.weights[-l+1].transpose(), delta) * sp nabla_b[-l] = delta nabla_w[-l] = np.dot(delta, activations[-l-1].transpose()) return nabla_b, nabla_w
def feed_forward_fast(self, a): for b, w in zip(self.biases, self.weights): a = sigmoid(np.dot(w, a)+b) return a
def activate(self): self.z = sum([synapse.neuron_from.activity*synapse.weight for synapse in self.synapses_in]) + self.bias self.activity = sigmoid(self.z)