Exemplo n.º 1
0
def compare_more_models_final(experiments, eval_data, labels=None, difficulties=True, runs=1):
    labels = sorted(experiments.keys()) if labels is None else labels

    df = pd.DataFrame(columns=["labels", "rmse"])
    for label in labels:
        r = Evaluator(experiments[label][0](label), experiments[label][1](label)).get_report()
        df.loc[len(df)] = (label, r["rmse"])

    plt.subplot(131)
    compare_models([experiments[label][0](label) for label in labels],
                   [experiments[label][1](label) for label in labels],
                   names=labels,
                   palette=sns.color_palette()[:4],
                   metric="rmse", force_evaluate=False, answer_filters={
            "binary": response_as_binary(),
        }, runs=runs, hue_order=False, with_all=False)

    plt.subplot(132)
    compare_models([experiments[label][0](label) for label in labels],
                   [experiments[label][1](label) for label in labels],
                   names=labels,
                   palette=sns.color_palette()[:4],
                   metric="rmse", force_evaluate=False, answer_filters={
            # "response >7s-0.5": transform_response_by_time(((7, 0.5),), binarize_before=True),
            "linear 14": transform_response_by_time_linear(14),
        }, runs=runs, hue_order=False, with_all=False)

    plt.subplot(133)
    compare_models([experiments[label][0](label) for label in labels],
                   [experiments[label][1](label) for label in labels],
                   names=labels,
                   palette=sns.color_palette()[:4],
                   metric="AUC", force_evaluate=False, runs=runs, hue_order=False)
Exemplo n.º 2
0
def compare_more_models(experiments, eval_data, labels=None, difficulties=True, runs=1):
    labels = sorted(experiments.keys()) if labels is None else labels

    results = pd.DataFrame(index=labels, columns=labels, dtype=float)
    for label in labels: results[label][label] = 1
    for i, label1 in enumerate(labels):
        for label2 in labels[i+1:]:
            print(label1, label2)
            for run in range(runs):
                data1 = experiments[label1][0](label1)
                data2 = experiments[label2][0](label2)
                data1.set_seed(run)
                data2.set_seed(run)
                compare_fce = compare_model_difficulties if difficulties else compare_model_predictions
                c = compare_fce(data1, data2,
                                experiments[label1][1](label1), experiments[label2][1](label2), plot=False)
                results[label1][label2] = c
                results[label2][label1] = c

    df = pd.DataFrame(columns=["labels", "rmse"])
    for label in labels:
        r = Evaluator(experiments[label][0](label), experiments[label][1](label)).get_report()
        df.loc[len(df)] = (label, r["rmse"])

    plt.subplot(221)
    plt.title("Correlations of " + "difficulties" if difficulties else "predictions")
    sns.heatmap(results, vmax=1, vmin=.4)
    # plt.yticks(rotation=0)
    # plt.xticks(rotation=90)
    plt.subplot(222)
    compare_models(eval_data, [experiments[label][1](label) for label in labels], answer_filters={
        # "response >7s-0.5": transform_response_by_time(((7, 0.5),)),
        "long (30) students": filter_students_with_many_answers(number_of_answers=30),
    }, runs=runs, hue_order=False)
    plt.subplot(223)
    compare_models([experiments[label][0](label) for label in labels],
                   [experiments[label][1](label) for label in labels],
                   names=labels,
                   metric="rmse", force_evaluate=False, answer_filters={
            "binary": response_as_binary(),
            "response >7s-0.5": transform_response_by_time(((7, 0.5),), binarize_before=True),
        }, runs=runs, hue_order=False)

    plt.subplot(224)
    compare_models([experiments[label][0](label) for label in labels],
                   [experiments[label][1](label) for label in labels],
                   names=labels,
                   metric="AUC", force_evaluate=False, runs=runs, hue_order=False)

    return results