def test_nnCostFunction(self): cost = nnCostFunction(unrolltheta(self.theta1, self.theta2), self.s_1, self.s_2, self.K, self.X, self.y, lamda=0) self.assertAlmostEqual(cost, 0.287629, places=6) cost_reg = nnCostFunction(unrolltheta(self.theta1, self.theta2), self.s_1, self.s_2, self.K, self.X, self.y, lamda=1.0) self.assertAlmostEqual(cost_reg, 0.383770, places=6)
def computeNumericalGradient( theta, input_layer_size, hidden_layer_size, num_labels, X, y, lamda ): numgrad = np.zeros( np.shape(theta) ) perturb = np.zeros( np.shape(theta) ) #38 x 1 e = 1e-4 num_elements = np.shape(theta)[0] for p in range(0, num_elements) : perturb[p] = e loss1 = nnCostFunction( theta - perturb, input_layer_size, hidden_layer_size, num_labels, X, y, lamda) loss2 = nnCostFunction( theta + perturb, input_layer_size, hidden_layer_size, num_labels, X, y, lamda) numgrad[p] = (loss2 - loss1) / (2 * e) perturb[p] = 0 return numgrad
def computeNumericalGradient(theta, input_layer_size, hidden_layer_size, num_labels, X, y, lamda): numgrad = np.zeros(np.shape(theta)) perturb = np.zeros(np.shape(theta)) #38 x 1 e = 1e-4 num_elements = np.shape(theta)[0] for p in range(0, num_elements): perturb[p] = e loss1 = nnCostFunction(theta - perturb, input_layer_size, hidden_layer_size, num_labels, X, y, lamda) loss2 = nnCostFunction(theta + perturb, input_layer_size, hidden_layer_size, num_labels, X, y, lamda) numgrad[p] = (loss2 - loss1) / (2 * e) perturb[p] = 0 return numgrad