Beispiel #1
0
 def backprop(self, a, y):
     gradients = neur.backprop(a, y, [self.encode_weights, self.decode_weights], 0)
     if self.contractive:
         reg = 0.01 * self.contractive_reg_gradient(a[1][:,1:])
         gradients[0][:,1:] += reg[:,1:]
         gradients[1][:,1:] += reg[:,1:].transpose()
     return gradients
Beispiel #2
0
 def test_backprop(self):
     X = np.array([[1, 2, 3], [2, 3, 4]])
     y = np.array([[0, 0, 1], [0, 0, 1]])
     res, a = neur.forward_prop(X, [self.theta, self.theta2])
     grads = neur.backprop(a, y, [self.theta, self.theta2], 0)
     grads_check = neur.gradient_check(X, y, [self.theta, self.theta2], neur.logistic_squared_cost_function)
     self.equalish(grads[0], grads_check[0])
     self.equalish(grads[1], grads_check[1])
Beispiel #3
0
 def test_backprop(self):
     X = np.array([[1, 2, 3], [2, 3, 4]])
     y = np.array([[0, 0, 1], [0, 0, 1]])
     res, a = neur.forward_prop(X, [self.theta, self.theta2])
     grads = neur.backprop(a, y, [self.theta, self.theta2], 0)
     grads_check = neur.gradient_check(X, y, [self.theta, self.theta2],
                                       neur.logistic_squared_cost_function)
     self.equalish(grads[0], grads_check[0])
     self.equalish(grads[1], grads_check[1])
Beispiel #4
0
 def backprop(self, a, y):
     return neur.backprop(a, y, [self.encode_weights, self.decode_weights], 0)
Beispiel #5
0
 def backprop(self, a, y):
     return neur.backprop(a, y, [self.encode_weights, self.decode_weights],
                          0)