Example #1
0
    def test_solve_with_recorder(self):
        # Number of persons.
        P = 100
        # Number of items.
        I = 20
        # Number of latent ability dimensions (sub-scales).
        C = 1
        # Using 2-PL model with fixed discrimination and no asymptote for all items.
        asym = 0  # 0.25
        discrimination = 1
        X, theta, b, c = sim.generate_dichotomous_responses(
            P, I, C, asymptote=asym, discrimination=discrimination)

        recorder = nirt.run_recorder.RunRecorder()
        solver = nirt.solver_refinement.SolverRefinement(X,
                                                         c,
                                                         recorder=recorder)
        theta_approx = solver.solve()

        assert recorder.theta.keys() == set([0, 4, 8, 16]), \
            "unexpected recorder theta key set {}".format(recorder.theta.keys())
        assert len(recorder.theta[0]) == 1
        assert all(
            len(recorder.theta[r]) == 2 for r in recorder.theta.keys()
            if r != 0)

        assert recorder.irf.keys() == set([4, 8, 16]), \
            "unexpected recorder IRF key set {}".format(recorder.irf.keys())
        assert all(
            len(recorder.irf[r]) == 2 for r in recorder.irf.keys() if r != 0)
Example #2
0
    def test_generate_continuous_responses(self):
        np.random.seed(0)

        # Number of persons.
        P = 100
        # Number of items.
        I = 20
        # Number of latent ability dimensions (sub-scales).
        C = 1
        # Using 2-PL model with fixed discrimination and no asymptote for all items.
        asym = 0  # 0.25
        discrimination = 1

        x, theta, b, c = sim.generate_dichotomous_responses(
            P,
            I,
            C,
            asymptote=asym,
            discrimination=discrimination,
            dichotomous=False)

        assert x.shape == (P, I)
        assert theta.shape == (P, C)
        assert b.shape == (I, )
        assert c.shape == (I, )
Example #3
0
    def test_initial_guess(self):
        """Verifies that the histograms built from the initial theta guess approximate the exact IRFs to a reasonable
        degree.
        """
        I = 20
        C = 1
        asym = 0
        discrimination = 1
        # Exact IRFs.
        model_irf = [
            lambda t, i=i: nirt.simulate.simulate_data.three_pl_model(
                t, discrimination, b[i], asym) for i in range(I)
        ]

        num_bins = 5
        method = "uniform-fixed"  #"quantile"  # "uniform"

        # Check that the norm of the difference between the approximate and exact IRF at the nodes is small enough for
        # all items, and that this error decreases when the sample size increases.
        num_p = 5
        num_experiments = 10
        e_mean = [0] * num_p
        for k, P in enumerate(100 * 4**np.arange(num_p)):
            e = [0] * num_experiments
            for experiment in range(num_experiments):
                X, theta, b, c = \
                    sim.generate_dichotomous_responses(P, I, C, asymptote=asym, discrimination=discrimination)

                # Initial guess for thetas.
                t = nirt.likelihood.initial_guess(X, c)
                # Build IRFs.
                xlim = [(min(t[:, ci]) - 2, max(t[:, ci]) + 2)
                        for ci in range(C)]
                grid = [
                    nirt.grid.create_grid(t[:, ci],
                                          num_bins,
                                          method=method,
                                          xlim=xlim[ci]) for ci in range(C)
                ]
                irf = [
                    nirt.irf.ItemResponseFunction(grid[c[i]], X[:, i])
                    for i in range(I)
                ]

                # Calculate the scaled, weighted L2 norm of the error in the approximate IRF at the nodes.
                # Weight = bin count (so that all persons contribute the same weight to the norm: more
                # dense bins should count more).
                error = nirt.error.error_norm_by_item(model_irf, irf)
                #print("P {} {:.3f} +- {:.3f}".format(P, error.mean(), error.std()))
                e[experiment] = error.mean()
            e_mean[k] = np.mean(e)
        assert_array_almost_equal(
            e_mean, [0.06139, 0.03444, 0.02465, 0.01991, 0.01670], 5)
 def test_solve_multidimensional_theta(self):
     # Number of persons.
     P = 100
     # Number of items.
     I = 20
     # Number of latent ability dimensions (sub-scales).
     C = 5
     # Using 2-PL model with fixed discrimination and no asymptote for all items.
     asym = 0  # 0.25
     discrimination = 1
     X, theta, b, c = sim.generate_dichotomous_responses(P, I, C, asymptote=asym, discrimination=discrimination)
     solver = nirt.solver_sampled_simulated_annealing.SolverSampledSimulatedAnnealing(X, c)
     theta = solver.solve()
     print(theta)
     assert theta == 0
Example #5
0
    def test_solve_unidimensional_theta(self):
        # Number of persons.
        P = 100
        # Number of items.
        I = 20
        # Number of latent ability dimensions (sub-scales).
        C = 1
        # Using 2-PL model with fixed discrimination and no asymptote for all items.
        asym = 0  # 0.25
        discrimination = 1
        X, theta, b, c = sim.generate_dichotomous_responses(P, I, C, asymptote=asym, discrimination=discrimination)

        solver = nirt.solver_mcmc.SolverMcmc(X, c)
        theta_approx = solver.solve()

        assert theta_approx.shape == theta.shape
Example #6
0
    def setUp(self) -> None:
        for handler in logging.root.handlers[:]:
            logging.root.removeHandler(handler)
        logging.basicConfig(level=logging.WARN,
                            format="%(levelname)-8s %(message)s",
                            datefmt="%a, %d %b %Y %H:%M:%S")

        np.random.seed(0)

        # Number of persons.
        self.P = 100
        # Number of items.
        self.I = 20
        # Number of latent ability dimensions (sub-scales).
        self.C = 1
        # Using 2-PL model with fixed discrimination and no asymptote for all items.
        self.x, self.theta, self.b, self.c = \
            sim.generate_dichotomous_responses(self.P, self.I, self.C, asymptote=0)
Example #7
0
if __name__ == "__main__":
    for handler in logging.root.handlers[:]:
        logging.root.removeHandler(handler)
    logging.basicConfig(level=logging.INFO,
                        format="%(levelname)-8s %(message)s",
                        datefmt="%a, %d %b %Y %H:%M:%S")

    # Generate synthetic data, uni-dimensional latent trait.
    np.random.seed(0)
    P = 1000
    I = 20
    C = 1
    asym = 0
    discrimination = 1
    X, theta, b, c = sim.generate_dichotomous_responses(
        P, I, C, asymptote=asym, discrimination=discrimination)

    # Algorithm parameters.
    # Aberration accentuation parameter.
    s = 2.0
    # Only probabilities up to this value are considered for aberration graph links.
    p_max = 0.3
    # A collusion group must correspond to at least this fraction of leaked items.
    min_item_fraction = 0.2

    # Use IRT to estimate the probability that a person gets an item right.
    p, irf = calculate_irf_proabilities(X, c)
    # Check IRF accuracy (since this is synthetic data we can calculate this here).
    model = [
        lambda x, b=b[i]: sim.three_pl_model(x, discrimination, b, asym)
        for i in range(I)