示例#1
0
def _graded_partial_integral(theta, betas, betas_roll, discrimination,
                             responses):
    """Computes the partial integral for the graded response."""
    graded_prob = (irt_evaluation(betas, discrimination, theta) -
                   irt_evaluation(betas_roll, discrimination, theta))

    #TODO: Potential chunking for memory limited systems
    return graded_prob[responses, :].prod(axis=0)
示例#2
0
            def _theta_min(theta):
                # Solves for ability parameters (theta)
                graded_prob = (
                    irt_evaluation(betas, discrimination, theta) -
                    irt_evaluation(betas_roll, discrimination, theta))

                values = graded_prob[responses[:, ndx]]
                return -np.log(values).sum()
示例#3
0
            def _alpha_beta_min(estimates):
                # Set the estimates int
                discrimination[start_ndx:end_ndx] = estimates[0]
                betas[start_ndx + 1:end_ndx] = estimates[1:]
                betas_roll[start_ndx:end_ndx - 1] = estimates[1:]

                graded_prob = (
                    irt_evaluation(betas, discrimination, thetas) -
                    irt_evaluation(betas_roll, discrimination, thetas))

                values = np.take_along_axis(graded_prob,
                                            responses[None, ndx],
                                            axis=0).squeeze()
                np.clip(values, 1e-23, np.inf, out=values)
                return -np.log(values[valid_response_mask[ndx]]).sum()
示例#4
0
    def test_irt_evaluation_single_discrimination(self):
        """Testing the IRT evaluation method when discrimination is scalar."""
        difficuly = np.array([-1, 1])
        theta = np.array([1., 2.])
        discrimination = 4.0

        # Expected output
        expected_output = 1.0 / \
            (1.0 + np.exp(discrimination * (difficuly[:, None] - theta)))
        output = irt_evaluation(difficuly, discrimination, theta)

        np.testing.assert_allclose(output, expected_output)
示例#5
0
def _graded_func(difficulty, discrimination, thetas, output):
    """
    Private function to compute the probabilities for
    the graded response model.  This is done in place
    and does not return anything
    """
    # This model is based on the difference of standard
    # logistic functions.

    # Do first level
    output[0] = 1.0 - irt_evaluation(np.array([difficulty[0]]),
                                     discrimination, thetas)

    for level_ndx in range(1, output.shape[0]-1):
        right = irt_evaluation(np.array([difficulty[level_ndx]]),
                               discrimination, thetas)
        left = irt_evaluation(np.array([difficulty[level_ndx-1]]),
                              discrimination, thetas)
        output[level_ndx] = left - right

    # Do last level
    output[-1] = irt_evaluation(np.array([difficulty[-1]]),
                                discrimination, thetas)
示例#6
0
def create_synthetic_irt_dichotomous(difficulty,
                                     discrimination,
                                     thetas,
                                     guessing=0,
                                     seed=None):
    """ Creates dichotomous unidimensional synthetic IRT data.

    Creates synthetic IRT data to test parameters estimation functions.  
    Only for use with dichotomous outputs

    Assumes the model
        P(theta) = 1.0 / (1 + exp(discrimination * (theta - difficulty)))

    Args:
        difficulty: [array] of difficulty parameters
        discrimination:  [array | number] of discrimination parameters
        thetas: [array] of person abilities
        guessing: [array | number] of guessing parameters associated with items
        seed: Optional setting to reproduce results

    Returns:
        synthetic_data: (2d array) realization of possible response given parameters

    """
    if seed:
        np.random.seed(seed)

    if np.ndim(guessing) < 1:
        guessing = np.full_like(difficulty, guessing)

    continuous_output = irt_evaluation(difficulty, discrimination, thetas)

    # Add guessing parameters
    continuous_output *= (1.0 - guessing[:, None])
    continuous_output += guessing[:, None]

    # convert to binary based on probability
    random_compare = np.random.rand(*continuous_output.shape)

    return (random_compare <= continuous_output).astype('int')
示例#7
0
    def test_partial_integration_array(self):
        """Tests the integration quadrature function on array."""

        # Set seed for repeatability
        np.random.seed(121)

        discrimination = np.random.rand(5) + 0.5
        difficuly = np.linspace(-1.3, 1.3, 5)
        the_sign = (-1)**np.random.randint(low=0, high=2, size=(5, 1))

        quad_points = _get_quadrature_points(61, -6, 6)
        dataset = _compute_partial_integral(quad_points, difficuly, discrimination,
                                            the_sign)

        value = integrate.fixed_quad(lambda x: dataset, -6, 6, n=61)[0]

        discrrm = discrimination * the_sign.squeeze() * -1
        xx = np.linspace(-6, 6, 5001)
        yy = irt_evaluation(difficuly, discrrm, xx)
        yy = yy.prod(axis=0)
        expected = yy.sum() * 12 / 5001

        self.assertAlmostEqual(value, expected.sum(), places=3)