예제 #1
0
    def test_adaptive_test(self):
        random.seed(123)
        np.random.seed(123)
        theta_estimate = 0
        actual_theta = [0, 1]
        items = simulate_items(difficulty={
            'mean': 0,
            'sd': 1
        },
                               discrimination={
                                   'mean': 1,
                                   'sd': .05
                               },
                               guessing=None,
                               item_count=500)

        probabilities, response_vector = item_vectors(items=items,
                                                      abilities=actual_theta)

        temp = []
        for row in response_vector.iterrows():
            index, data = row
            temp.append(data.tolist())

        items['correct'] = temp[0]
        items_taken = []
        response_list = []
        items = pd.DataFrame(items)
        # simulate exam

        for i in range(0, 40):
            remaining = items_remaining(items, items_taken)
            remaining_items, next_item = select_next_item(
                remaining, theta_estimate)
            item = remaining_items.loc[next_item]
            a = np.array(item['a'])
            b = np.array(item['b'])
            current_probabilities = _p_2pl(a, theta_estimate, b)
            rand_num = random.uniform(0, 1)
            if current_probabilities >= rand_num:
                response_list.append(1)
            else:
                response_list.append(0)
            items_taken.append(next_item)
            new_dataframe = pd.DataFrame(
                list(zip(items_taken, response_list)),
                columns=['new_index',
                         'corrects']).set_index('new_index',
                                                drop=True).sort_index()
            del new_dataframe.index.name
            taken_items = new_dataframe.join(items)
            max_prob, theta_estimate = L(np.array(taken_items['correct']),
                                         np.array(taken_items['a']),
                                         np.array(taken_items['b']))

        self.assertEqual(theta_estimate, -0.84000000000006736)
예제 #2
0
    def test_CTT_methods(self):

        random.seed(123)
        np.random.seed(123)

        items = simulate_items()
        people = simulate_people(100, {'mean': 0, 'sd': 1})
        prob_vector, response_vector = item_vectors(items, people)

        alphas = calculate_alpha(response_vector)
        p_values = get_p_values(response_vector)
        discrim, discrim2 = discrimination_index(response_vector)
        examinee_scores = examinee_score(response_vector)
        self.assertEqual(alphas, 0.894933894194553)
        self.assertEqual(p_values.iloc[0, 0], 0.67)
        self.assertEqual(discrim[0], 0.4401980052948809)
        self.assertEqual(examinee_scores[0], 15)
예제 #3
0
 def test_response_vector(self):
     np.random.seed(123)
     random.seed(123)
     thetas = [0, 1]
     items = simulate_items(difficulty={
         'mean': 0,
         'sd': 1
     },
                            discrimination={
                                'mean': 1,
                                'sd': .05
                            },
                            guessing=None,
                            item_count=3)
     expected = pd.DataFrame(data={0: [1, 1], 1: [1, 0], 2: [1, 1]})
     probabilities, response_vector = item_vectors(items=items,
                                                   abilities=thetas)
     self.assertEqual(response_vector[0].tolist(), expected[0].tolist())
예제 #4
0
 def test_item(self):
     np.random.seed(123)
     items = simulate_items(difficulty={
         'mean': 0,
         'sd': 1
     },
                            discrimination={
                                'mean': 1,
                                'sd': .05
                            },
                            guessing=None,
                            item_count=3)
     discrim_vector = items['a'].tolist()
     difficulty_vector = items['b'].tolist()
     self.assertEqual(
         discrim_vector,
         [0.9246852643040954, 0.9710699874015731, 1.0825718268548576])
     self.assertEqual(
         difficulty_vector,
         [-1.0856306033005612, 0.9973454465835858, 0.28297849805199204])
예제 #5
0
        df1 = df1.reset_index()
        df2 = items.drop(df1.index)
        df1.drop(['index'], axis=1)
        df1 = examinee_score(df1)
        df2 = examinee_score(df2)
        reliability = pearsonr(df1, df2)

    else:
        print("error, please select a valid split")

    return reliability


def calculate_sem(items, reliability=None):

    if reliability == None:
        reliability = calculate_alpha(items)
    total_scores = examinee_score(items)
    std = np.array(total_scores)
    std = np.std(std)
    return std * math.sqrt(1 - reliability)


items = simulate_items()
people = simulate_people(100, {'mean': 0, 'sd': 1})
prob_vector, response_vector = item_vectors(items, people)

print(calculate_sem(response_vector))

#todo Calculate SEM using IRT item stats
예제 #6
0
    initial_scores = examinee_score(subsequent_data)
    subsequent_scores = examinee_score(subsequent_data) + score_difference
    print(test1_info, test2_info)

    equating_dict = {
        'initial_scores': initial_scores,
        'mean_equated_scores': subsequent_scores,
        'mean_sd_equated_scores': None
    }

    equating_df = pd.DataFrame(equating_dict)

    if sd_equating == True:
        sd_division = test1_info['score_sd'] / test2_info['score_sd']
        equating_df['mean_sd_equated_scores'] = (
            sd_division) * equating_df['initial_scores'] + (
                test1_info['average_score'] -
                sd_division * test2_info['average_score'])
        equating_dict = equating_df.to_dict()

    return equating_dict


items1 = simulate_items()
items2 = simulate_items(difficulty={'mean': 1, 'sd': 1})
people = simulate_people(100, {'mean': 0, 'sd': 1})
prob_vector1, response_vector1 = item_vectors(items1, people)
prob_vector2, response_vector2 = item_vectors(items2, people)
examinee_scores1 = examinee_score(response_vector1)
examinee_scores2 = examinee_score(response_vector2)
print(mean_equating(response_vector1, response_vector2, sd_equating=True))