Пример #1
0
    def test_pfa_coef_counts(self):
        coef = pd.DataFrame(
            columns=["factor", "intercept", "correct_coef", "incorrect_coef"],
            data=[
                ("a", 0.11, 1.0, -1.0),
                ("b", 0.22, 2.0, -2.0),
            ])

        actual = pfa_coef_counts(coef)

        expected = array(
            [[1.0, 1.0, 2.0], [0.0, -1.0, -2.0], [1.0, 0.11, 0.22]], )

        assert_equal(actual, expected)
Пример #2
0
    def test_pfa_prediction_m__works_with_all_skills_active(self):
        coef_pd = pd.DataFrame(
            columns=["factor", "intercept", "correct_coef", "incorrect_coef"],
            data=[
                ("a", 9., 1., -1.),
                ("b", 7., 2., -2.),
            ])
        coef = pfa_coef_counts(coef_pd)

        data = array([[1., 12., 22.], [0., 11., 21.], [1., 1., 1.]], )

        actual = pfa_prediction_m(data, coef)

        expected = 9. + (12. * 1.) + (11. * -1.) + 7. + (22. * 2.) + (21. *
                                                                      -2.)
        assert_equal(actual, expected)
Пример #3
0
    def test_pfa_dashboard__using_diff(self):
        coef = pfa_coef_counts(
            pd.DataFrame(columns=[
                "factor", "intercept", "correct_coef", "incorrect_coef"
            ],
                         data=[
                             ("diff1", 6., 1.1, -1.1),
                             ("diff2", 7., 1.1, -1.1),
                             ("a", 8., 2., -2.),
                             ("b", 9., 3., -3.),
                             ("c", 10., 4., -4.),
                         ]))

        data = array([[1., 12., 22., 32., 42., 52.],
                      [0., 11., 21., 31., 41., 51.], [1., 1., 1., 1., 1., 1.]],
                     )  # This row shouldn't matter

        actual = pfa_dashboard(data_counts=data,
                               coef_counts=coef,
                               num_diffs=2,
                               num_skills=3,
                               diff_ind=1)

        expected = array([
            pred(data,
                 coef,
                 num_diffs=2,
                 num_skills=3,
                 diff_ind=1,
                 skill_ind=0),
            pred(data,
                 coef,
                 num_diffs=2,
                 num_skills=3,
                 diff_ind=1,
                 skill_ind=1),
            pred(data,
                 coef,
                 num_diffs=2,
                 num_skills=3,
                 diff_ind=1,
                 skill_ind=2)
        ])
        assert_equal(actual, expected)
np.set_printoptions(linewidth=200,
                    threshold=(full_history_length + 1) *
                    model_history_length *
                    feature_num)  # unset with np.set_printoptions()

# output location
# run_dir_load = os.path.join('runs', f'run_embedded_l1-{pred_model_layer_1}_l2-{pred_model_layer_2}_e{pred_epochs}')
run_dir = os.path.join('dashboards', f'pfa_dashboard', 'pick1')

if not os.path.exists(run_dir):
    os.makedirs(run_dir)

stdout_add_file(os.path.join(run_dir, 'log.txt'))

coef = pfa_coef_counts(pfa_coef())

# ================= VALIDATE ===============
print(f"loading validate data")
answer_counts_validate = read_numpy_3d_array_from_txt(
    os.path.join('outputs', 'pick1',
                 f'answer_counts_validate_l{full_history_length}_s1.txt'))
# ========= Dashboard and current question
print(f"computing dashboard")
answer_counts_validate_dashboard = [
    np.concatenate((pfa_dashboard(ac,
                                  coef,
                                  num_diffs=0,
                                  num_skills=diff_and_skill_num,
                                  diff_ind=-1), np.array(ac[2][1:])))
    for ac in answer_counts_validate