def test_gradient(self):
        gradient = LogisticRegression._gradient_loss(self.x, self.t, self.b, self.w, self.lam)
        gradient_act = np.array([0.0, 7 * 3, 7 * -1])
        error = LogisticRegression._sigmoid(self.x, self.b, self.w) - self.t
        gradient_act[0] = np.sum(error)
        gradient_act[1] += np.sum(error * self.x[:, 0])
        gradient_act[2] += np.sum(error * self.x[:, 1])

        self.assertTrue(np.all(np.abs(gradient - gradient_act) < 1.0e-12))