Esempio n. 1
0
    def test_log_factor_gradient(self):
        delta = 0.00001
        y1 = 1.0
        y0 = 2.0
        theta = -2.0
        alpha = 1.0
        beta = 0.0
        # simulate the gradient
        true_gradient_approx_theta = (
            clib.log_likelihood_2PL(y1, y0, theta + delta, alpha, beta) -
            clib.log_likelihood_2PL(y1, y0, theta, alpha, beta)) / delta
        # calculate
        calc_gradient = tools.log_likelihood_factor_gradient(
            y1, y0, theta, alpha, beta)

        self.assertTrue(abs(calc_gradient - true_gradient_approx_theta) < 1e-4)

        # simulate the gradient
        c = 0.25
        true_gradient_approx_theta = (
            clib.log_likelihood_2PL(y1, y0, theta + delta, alpha, beta, c) -
            clib.log_likelihood_2PL(y1, y0, theta, alpha, beta, c)) / delta
        # calculate
        calc_gradient = tools.log_likelihood_factor_gradient(
            y1, y0, theta, alpha, beta, c)

        self.assertTrue(abs(calc_gradient - true_gradient_approx_theta) < 1e-4)
Esempio n. 2
0
    def test_log_likelihood(self):
        # raise error
        # with self.assertRaisesRegexp(ValueError,'Slope/Alpha should not be zero or negative.'):
        #     clib.log_likelihood_2PL(0.0, 1.0, 0.0,-1.0,0.0)

        # the default model, log likelihood is log(0.5)
        ll = clib.log_likelihood_2PL(1.0, 0.0, 0.0, 1.0, 0.0)
        self.assertEqual(ll, math.log(0.5))
        ll = clib.log_likelihood_2PL(0.0, 1.0, 0.0, 1.0, 0.0)
        self.assertEqual(ll, math.log(0.5))

        # check the different model
        ll = clib.log_likelihood_2PL(1.0, 0.0, 1.0, 1.0, 0.0)
        self.assertEqual(ll, math.log(1.0 / (1.0 + math.exp(-1.0))))

        ll = clib.log_likelihood_2PL(0.0, 1.0, 1.0, 1.0, 0.0)
        self.assertEqual(ll, math.log(1.0 - 1.0 / (1.0 + math.exp(-1.0))))

        # check a real value
        ll = clib.log_likelihood_2PL(0.0, 1.0, -1.1617696779178492, 1.0, 0.0)

        self.assertTrue(abs(ll + 0.27226272946920399) < 0.0000000001)

        # check if it handles c correctly
        ll = clib.log_likelihood_2PL(1.0, 0.0, 0.0, 1.0, 0.0, 0.25)
        self.assertEqual(ll, math.log(0.625))
        ll = clib.log_likelihood_2PL(0.0, 1.0, 0.0, 1.0, 0.0, 0.25)
        self.assertEqual(ll, math.log(0.375))
Esempio n. 3
0
    def test_log_item_gradient(self):
        delta = 0.00001
        y1 = 1.0
        y0 = 2.0
        theta = -2.0
        alpha = 1.0
        beta = 0.0
        # simulate the gradient
        true_gradient_approx_beta = (
            clib.log_likelihood_2PL(y1, y0, theta, alpha, beta + delta) -
            clib.log_likelihood_2PL(y1, y0, theta, alpha, beta)) / delta
        true_gradient_approx_alpha = (
            clib.log_likelihood_2PL(y1, y0, theta, alpha + delta, beta) -
            clib.log_likelihood_2PL(y1, y0, theta, alpha, beta)) / delta
        # calculate
        calc_gradient = clib.log_likelihood_2PL_gradient(
            y1, y0, theta, alpha, beta)

        self.assertTrue(
            abs(calc_gradient[0] - true_gradient_approx_beta) < 1e-4)
        self.assertTrue(
            abs(calc_gradient[1] - true_gradient_approx_alpha) < 1e-4)

        # simulate the gradient with c
        c = 0.25
        true_gradient_approx_beta = (
            clib.log_likelihood_2PL(y1, y0, theta, alpha, beta + delta, c) -
            clib.log_likelihood_2PL(y1, y0, theta, alpha, beta, c)) / delta
        true_gradient_approx_alpha = (
            clib.log_likelihood_2PL(y1, y0, theta, alpha + delta, beta, c) -
            clib.log_likelihood_2PL(y1, y0, theta, alpha, beta, c)) / delta
        # calculate
        calc_gradient = clib.log_likelihood_2PL_gradient(
            y1, y0, theta, alpha, beta, c)

        self.assertTrue(
            abs(calc_gradient[0] - true_gradient_approx_beta) < 1e-4)
        self.assertTrue(
            abs(calc_gradient[1] - true_gradient_approx_alpha) < 1e-4)