コード例 #1
0
ファイル: mml_full_methods.py プロジェクト: xuek622/girth
    def alpha_min_func(alpha_estimate):
        discrimination[:] = alpha_estimate

        for iteration in range(options['max_iteration']):
            previous_difficulty = difficulty.copy()

            # Quadrature evaluation for values that do not change
            partial_int = _compute_partial_integral(theta, difficulty,
                                                    discrimination, the_sign)
            partial_int *= distribution

            for item_ndx in range(n_items):
                # pylint: disable=cell-var-from-loop

                # remove contribution from current item
                local_int = _compute_partial_integral(theta, difficulty[item_ndx, None],
                                                      discrimination[item_ndx, None],
                                                      the_sign[item_ndx, None])

                partial_int /= local_int

                def min_local_func(beta_estimate):
                    difficulty[item_ndx] = beta_estimate

                    estimate_int = _compute_partial_integral(theta, difficulty[item_ndx, None],
                                                             discrimination[item_ndx, None],
                                                             the_sign[item_ndx, None])

                    estimate_int *= partial_int

                    otpt = integrate.fixed_quad(
                        lambda x: estimate_int, quad_start, quad_stop, n=quad_n)[0]

                    return -np.log(otpt).dot(counts)

                fminbound(min_local_func, -4, 4)

                # Update the partial integral based on the new found values
                estimate_int = _compute_partial_integral(theta, difficulty[item_ndx, None],
                                                         discrimination[item_ndx, None],
                                                         the_sign[item_ndx, None])
                # update partial integral
                partial_int *= estimate_int

            if(np.abs(previous_difficulty - difficulty).max() < 1e-3):
                break

        cost = integrate.fixed_quad(
            lambda x: partial_int, quad_start, quad_stop, n=quad_n)[0]
        return -np.log(cost).dot(counts)
コード例 #2
0
def ability_eap(dataset, difficulty, discrimination, options=None):
    """Estimates the abilities for dichotomous models.

    Estimates the ability parameters (theta) for dichotomous models via
    expaected a posterior likelihood estimation.

    Args:
        dataset: [n_items, n_participants] (2d Array) of measured responses
        difficulty: (1d Array) of difficulty parameters for each item
        discrimination: (1d Array) of disrimination parameters for each item
        options: dictionary with updates to default options

    Returns:
        abilities: (1d array) estimated abilities

    Options:
        * distribution: callable
        * quadrature_bounds: (float, float)
        * quadrature_n: int

    """
    options = validate_estimation_options(options)
    quad_start, quad_stop = options['quadrature_bounds']
    quad_n = options['quadrature_n']

    if np.atleast_1d(discrimination).size == 1:
        discrimination = np.full(dataset.shape[0],
                                 discrimination,
                                 dtype='float')

    the_sign = convert_responses_to_kernel_sign(dataset)

    theta = _get_quadrature_points(quad_n, quad_start, quad_stop)
    partial_int = _compute_partial_integral(theta, difficulty, discrimination,
                                            the_sign)

    # Weight by the input ability distribution
    partial_int *= options['distribution'](theta)

    # Compute the denominator
    denominator = integrate.fixed_quad(lambda x: partial_int,
                                       quad_start,
                                       quad_stop,
                                       n=quad_n)[0]

    # compute the numerator
    partial_int *= theta

    numerator = integrate.fixed_quad(lambda x: partial_int,
                                     quad_start,
                                     quad_stop,
                                     n=quad_n)[0]

    return numerator / denominator
コード例 #3
0
ファイル: mml_methods.py プロジェクト: xuek622/girth
            def min_func_local(estimate):
                discrimination[ndx] = estimate
                _mml_abstract(difficulty[ndx, None], scalar[ndx, None],
                              discrimination[ndx, None], theta, distribution, options)
                estimate_int = _compute_partial_integral(theta, difficulty[ndx, None],
                                                         discrimination[ndx, None],
                                                         the_sign[ndx, None])

                estimate_int *= partial_int
                otpt = integrate.fixed_quad(
                    lambda x: estimate_int, quad_start, quad_stop, n=quad_n)[0]
                return -np.log(otpt).dot(counts)
コード例 #4
0
ファイル: mml_full_methods.py プロジェクト: xuek622/girth
                def min_local_func(beta_estimate):
                    difficulty[item_ndx] = beta_estimate

                    estimate_int = _compute_partial_integral(theta, difficulty[item_ndx, None],
                                                             discrimination[item_ndx, None],
                                                             the_sign[item_ndx, None])

                    estimate_int *= partial_int

                    otpt = integrate.fixed_quad(
                        lambda x: estimate_int, quad_start, quad_stop, n=quad_n)[0]

                    return -np.log(otpt).dot(counts)
コード例 #5
0
ファイル: mml_methods.py プロジェクト: xuek622/girth
    def min_func(estimate):
        discrimination[:] = estimate
        _mml_abstract(difficulty, scalar, discrimination,
                      theta, distribution, options)

        partial_int = _compute_partial_integral(theta, difficulty,
                                                discrimination, the_sign)

        # add distribution
        partial_int *= distribution
        otpt = integrate.fixed_quad(
            lambda x: partial_int, quad_start, quad_stop, n=quad_n)[0]

        return -np.log(otpt).dot(counts)
コード例 #6
0
ファイル: mml_methods.py プロジェクト: eribean/girth
    def min_func(estimate):
        discrimination[:] = estimate
        _mml_abstract(difficulty, scalar, discrimination, theta,
                      distribution_x_weights)

        partial_int = np.ones((unique_sets.shape[1], theta.size))
        for ndx in range(n_items):
            partial_int *= _compute_partial_integral(
                theta, difficulty[ndx], discrimination[ndx], unique_sets[ndx],
                invalid_response_mask[ndx])
        partial_int *= distribution_x_weights

        # compute_integral
        otpt = np.sum(partial_int, axis=1)
        return -np.log(otpt).dot(counts)
コード例 #7
0
ファイル: test_utils.py プロジェクト: eribean/girth
    def test_partial_integration_single(self):
        """Tests the integration quadrature function."""

        # Set seed for repeatability
        np.random.seed(154)

        discrimination = 1.32
        difficulty = .67
        response = np.random.randint(low=0, high=2, size=(1, 10))

        quad_points, _ = _get_quadrature_points(61, -6, 6)

        value = _compute_partial_integral(
            quad_points, difficulty, discrimination, response[0],
            np.zeros_like(response, dtype='bool')[0])

        discrrm = discrimination * np.power(-1, response)
        expected = 1.0 / (1 +
                          np.exp(np.outer(discrrm,
                                          (quad_points - difficulty))))
        np.testing.assert_allclose(value, expected)
コード例 #8
0
ファイル: test_utils.py プロジェクト: xuek622/girth
    def test_partial_integration_array(self):
        """Tests the integration quadrature function on array."""

        # Set seed for repeatability
        np.random.seed(121)

        discrimination = np.random.rand(5) + 0.5
        difficuly = np.linspace(-1.3, 1.3, 5)
        the_sign = (-1)**np.random.randint(low=0, high=2, size=(5, 1))

        quad_points = _get_quadrature_points(61, -6, 6)
        dataset = _compute_partial_integral(quad_points, difficuly, discrimination,
                                            the_sign)

        value = integrate.fixed_quad(lambda x: dataset, -6, 6, n=61)[0]

        discrrm = discrimination * the_sign.squeeze() * -1
        xx = np.linspace(-6, 6, 5001)
        yy = irt_evaluation(difficuly, discrrm, xx)
        yy = yy.prod(axis=0)
        expected = yy.sum() * 12 / 5001

        self.assertAlmostEqual(value, expected.sum(), places=3)
コード例 #9
0
ファイル: mml_full_methods.py プロジェクト: xuek622/girth
def twopl_full(dataset, options=None):
    """ Estimates parameters in a 2PL IRT model.

    Please use twopl_mml instead.

    Args:
        dataset: [items x participants] matrix of True/False Values
        options: dictionary with updates to default options

    Returns:
        discrimination: (1d array) estimates of item discrimination
        difficulty: (1d array) estimates of item difficulties

    Options:
        * max_iteration: int
        * distribution: callable
        * quadrature_bounds: (float, float)
        * quadrature_n: int
"""
    options = validate_estimation_options(options)
    quad_start, quad_stop = options['quadrature_bounds']
    quad_n = options['quadrature_n']

    n_items = dataset.shape[0]
    unique_sets, counts = np.unique(dataset, axis=1, return_counts=True)
    the_sign = convert_responses_to_kernel_sign(unique_sets)

    theta = _get_quadrature_points(quad_n, quad_start, quad_stop)
    distribution = options['distribution'](theta)

    discrimination = np.ones((n_items,))
    difficulty = np.zeros((n_items,))

    for iteration in range(options['max_iteration']):
        previous_discrimination = discrimination.copy()

        # Quadrature evaluation for values that do not change
        partial_int = _compute_partial_integral(theta, difficulty,
                                                discrimination, the_sign)
        partial_int *= distribution

        for item_ndx in range(n_items):
            # pylint: disable=cell-var-from-loop
            local_int = _compute_partial_integral(theta, difficulty[item_ndx, None],
                                                  discrimination[item_ndx, None],
                                                  the_sign[item_ndx, None])

            partial_int /= local_int

            def min_func_local(estimate):
                discrimination[item_ndx] = estimate[0]
                difficulty[item_ndx] = estimate[1]

                estimate_int = _compute_partial_integral(theta,
                                                         difficulty[item_ndx, None],
                                                         discrimination[item_ndx, None],
                                                         the_sign[item_ndx, None])

                estimate_int *= partial_int
                otpt = integrate.fixed_quad(
                    lambda x: estimate_int, quad_start, quad_stop, n=quad_n)[0]

                return -np.log(otpt).dot(counts)

            # Two parameter solver that doesn't need derivatives
            initial_guess = np.concatenate((discrimination[item_ndx, None],
                                            difficulty[item_ndx, None]))
            fmin_slsqp(min_func_local, initial_guess, disp=False,
                       bounds=[(0.25, 4), (-4, 4)])

            # Update the partial integral based on the new found values
            estimate_int = _compute_partial_integral(theta, difficulty[item_ndx, None],
                                                     discrimination[item_ndx, None],
                                                     the_sign[item_ndx, None])
            # update partial integral
            partial_int *= estimate_int

        if(np.abs(discrimination - previous_discrimination).max() < 1e-3):
            break

    return discrimination, difficulty
コード例 #10
0
ファイル: mml_methods.py プロジェクト: xuek622/girth
def twopl_mml(dataset, options=None):
    """ Estimates parameters in a 2PL IRT model.

    Args:
        dataset: [items x participants] matrix of True/False Values
        options: dictionary with updates to default options

    Returns:
        discrimination: (1d array) estimate of item discriminations
        difficulty: (1d array) estimates of item diffiulties
    
    Options:
        * max_iteration: int
        * distribution: callable
        * quadrature_bounds: (float, float)
        * quadrature_n: int
    """
    options = validate_estimation_options(options)
    quad_start, quad_stop = options['quadrature_bounds']
    quad_n = options['quadrature_n']

    n_items = dataset.shape[0]
    n_no, n_yes = get_true_false_counts(dataset)
    scalar = n_yes / (n_yes + n_no)

    unique_sets, counts = np.unique(dataset, axis=1, return_counts=True)
    the_sign = convert_responses_to_kernel_sign(unique_sets)

    theta = _get_quadrature_points(quad_n, quad_start, quad_stop)
    distribution = options['distribution'](theta)

    # Perform the minimization
    discrimination = np.ones((n_items,))
    difficulty = np.zeros((n_items,))

    for iteration in range(options['quadrature_n']):
        previous_discrimination = discrimination.copy()

        # Quadrature evaluation for values that do not change
        # This is done during the outer loop to address rounding errors
        partial_int = _compute_partial_integral(theta, difficulty,
                                                discrimination, the_sign)
        partial_int *= distribution

        for ndx in range(n_items):
            # pylint: disable=cell-var-from-loop

            # remove contribution from current item
            local_int = _compute_partial_integral(theta, difficulty[ndx, None],
                                                  discrimination[ndx, None], the_sign[ndx, None])

            partial_int /= local_int

            def min_func_local(estimate):
                discrimination[ndx] = estimate
                _mml_abstract(difficulty[ndx, None], scalar[ndx, None],
                              discrimination[ndx, None], theta, distribution, options)
                estimate_int = _compute_partial_integral(theta, difficulty[ndx, None],
                                                         discrimination[ndx, None],
                                                         the_sign[ndx, None])

                estimate_int *= partial_int
                otpt = integrate.fixed_quad(
                    lambda x: estimate_int, quad_start, quad_stop, n=quad_n)[0]
                return -np.log(otpt).dot(counts)

            # Solve for the discrimination parameters
            fminbound(min_func_local, 0.25, 6)

            # Update the partial integral based on the new found values
            estimate_int = _compute_partial_integral(theta, difficulty[ndx, None],
                                                     discrimination[ndx, None],
                                                     the_sign[ndx, None])
            # update partial integral
            partial_int *= estimate_int

        if np.abs(discrimination - previous_discrimination).max() < 1e-3:
            break

    return discrimination, difficulty