Example #1
0
def compare_more_models(experiments, eval_data, labels=None, difficulties=True, runs=1):
    labels = sorted(experiments.keys()) if labels is None else labels

    results = pd.DataFrame(index=labels, columns=labels, dtype=float)
    for label in labels: results[label][label] = 1
    for i, label1 in enumerate(labels):
        for label2 in labels[i+1:]:
            print(label1, label2)
            for run in range(runs):
                data1 = experiments[label1][0](label1)
                data2 = experiments[label2][0](label2)
                data1.set_seed(run)
                data2.set_seed(run)
                compare_fce = compare_model_difficulties if difficulties else compare_model_predictions
                c = compare_fce(data1, data2,
                                experiments[label1][1](label1), experiments[label2][1](label2), plot=False)
                results[label1][label2] = c
                results[label2][label1] = c

    df = pd.DataFrame(columns=["labels", "rmse"])
    for label in labels:
        r = Evaluator(experiments[label][0](label), experiments[label][1](label)).get_report()
        df.loc[len(df)] = (label, r["rmse"])

    plt.subplot(221)
    plt.title("Correlations of " + "difficulties" if difficulties else "predictions")
    sns.heatmap(results, vmax=1, vmin=.4)
    # plt.yticks(rotation=0)
    # plt.xticks(rotation=90)
    plt.subplot(222)
    compare_models(eval_data, [experiments[label][1](label) for label in labels], answer_filters={
        # "response >7s-0.5": transform_response_by_time(((7, 0.5),)),
        "long (30) students": filter_students_with_many_answers(number_of_answers=30),
    }, runs=runs, hue_order=False)
    plt.subplot(223)
    compare_models([experiments[label][0](label) for label in labels],
                   [experiments[label][1](label) for label in labels],
                   names=labels,
                   metric="rmse", force_evaluate=False, answer_filters={
            "binary": response_as_binary(),
            "response >7s-0.5": transform_response_by_time(((7, 0.5),), binarize_before=True),
        }, runs=runs, hue_order=False)

    plt.subplot(224)
    compare_models([experiments[label][0](label) for label in labels],
                   [experiments[label][1](label) for label in labels],
                   names=labels,
                   metric="AUC", force_evaluate=False, runs=runs, hue_order=False)

    return results
    # AvgModel(),
    ItemAvgModel(),
    SkipHandler(ItemAvgModel()),
    # EloPriorCurrentModel(),
    EloPriorCurrentModel(KC=2, KI=0.5),
    SkipHandler(EloPriorCurrentModel(KC=2, KI=0.5)),
    # EloHierarchicalModel(),
    # EloHierarchicalModel(KC=1, KI=0.75),
    EloConcepts(concepts=concepts),
    SkipHandler(EloConcepts(concepts=concepts)),
    EloHierarchicalModel(KC=1, KI=0.75, alpha=0.8, beta=0.02),
    SkipHandler(EloHierarchicalModel(KC=1, KI=0.75, alpha=0.8, beta=0.02)),
    # EloHierarchicalModel(alpha=0.25, beta=0.02),
    # EloConcepts(),
], dont=0, force_evaluate=0, force_run=0, runs=5, hue_order=False, answer_filters={
    "long (50) student": data.filter_students_with_many_answers(),
    "long (30) student": data.filter_students_with_many_answers(number_of_answers=30),
    "long (11) student": data.filter_students_with_many_answers(number_of_answers=11),
    "response >5s-0.5": data.transform_response_by_time(((5, 0.5),))
},
               # palette=sns.color_palette()[:2] * 4
               )



# evaluator.Evaluator(d, EloHierarchicalModel(alpha=0.25, beta=0.02)).brier_graphs()
# evaluator.Evaluator(d, EloPriorCurrentModel()).brier_graphs()
# evaluator.Evaluator(d, ItemAvgModel()).brier_graphs()

if 0:
    utils.grid_search(d, EloHierarchicalModel,