def get_explanation_error_df():
    kwargs = {
        "nsamples": 1000,
        "background": "linear_consecutive",
        "pen": 1,
        "model": "rbf",
        "jump": 5,
        "plot": False,
        "figsize": (20, 3),
        "segments_size": None
    }

    lasts_synth_pat = load_multiple_lasts(
        "./saved/pat/pat/synth/matched_uniform_saxdt", simple_load=True)
    lasts_synth_pat0 = load_multiple_lasts(
        "./saved/pat/pat0/synth/matched_uniform_saxdt", simple_load=True)
    lasts_synth_pat1 = load_multiple_lasts(
        "./saved/pat/pat1/synth/matched_uniform_saxdt", simple_load=True)
    lasts_synth_pat2 = load_multiple_lasts(
        "./saved/pat/pat2/synth/matched_uniform_saxdt", simple_load=True)
    lasts_synth_pat3 = load_multiple_lasts(
        "./saved/pat/pat3/synth/matched_uniform_saxdt", simple_load=True)

    lasts_lists = [
        lasts_synth_pat, lasts_synth_pat0, lasts_synth_pat1, lasts_synth_pat2,
        lasts_synth_pat3
    ]
    names = [
        "lasts_synth_pat", "lasts_synth_pat0", "lasts_synth_pat1",
        "lasts_synth_pat2", "lasts_synth_pat3"
    ]

    df = defaultdict(list)
    for j, lasts_list in enumerate(lasts_lists):
        for i, lasts_ in enumerate(lasts_list):
            print(i + 1, "/", len(lasts_list))
            # df["lasts_" + names[j]].append(lasts_.explanation_error(divide_by_baseline=False))
            shapts = ShapTimeSeries()
            shapts.shap_values(lasts_.x, lasts_.blackbox, **kwargs)
            df["shap_abs_" + names[j]].append(
                shapts.explanation_error(divide_by_baseline=False))
        print(pd.DataFrame(df))
    return pd.DataFrame(df)
def get_predictions_synth():
    lasts_synth_pat = load_multiple_lasts(
        "./saved/pat/pat/synth/matched_uniform_saxdt", simple_load=True)
    lasts_synth_pat0 = load_multiple_lasts(
        "./saved/pat/pat0/synth/matched_uniform_saxdt", simple_load=True)
    lasts_synth_pat1 = load_multiple_lasts(
        "./saved/pat/pat1/synth/matched_uniform_saxdt", simple_load=True)
    lasts_synth_pat2 = load_multiple_lasts(
        "./saved/pat/pat2/synth/matched_uniform_saxdt", simple_load=True)
    lasts_synth_pat3 = load_multiple_lasts(
        "./saved/pat/pat3/synth/matched_uniform_saxdt", simple_load=True)

    lasts_lists = [
        lasts_synth_pat, lasts_synth_pat0, lasts_synth_pat1, lasts_synth_pat2,
        lasts_synth_pat3
    ]
    names = [
        "lasts_synth_pat", "lasts_synth_pat0", "lasts_synth_pat1",
        "lasts_synth_pat2", "lasts_synth_pat3"
    ]

    dict_exp = defaultdict(list)
    dict_true = defaultdict(list)
    dict_tss = defaultdict(list)
    i = 0
    for lasts_list, name in zip(lasts_lists, names):
        print(i + 1)
        for lasts_ in lasts_list:
            pred_importances, tss = lasts_.surrogate.predict_explanation(
                lasts_.z_tilde, 1)
            true_importances = lasts_.blackbox.predict_explanation(
                lasts_.z_tilde)
            dict_tss[name].append(tss)
            dict_true[name].append(true_importances)
            dict_exp[name].append(pred_importances)
        dump(dict_exp, "./stab_temp/dict_exp_" + str(i) + ".joblib")
        dump(dict_true, "./stab_temp/dict_true_" + str(i) + ".joblib")
        dump(dict_tss, "./stab_temp/dict_tss_" + str(i) + ".joblib")
        i += 1
    return dict_exp, dict_true, dict_tss
def get_usefulness_df():
    cbf_X_exp = build_cbf(n_samples=600, random_state=0, verbose=False)[10:]
    ecg200_X_exp = build_ecg200(verbose=False, random_state=0)[10:]
    coffee_X_exp = build_coffee(verbose=False, random_state=0)[10:]
    gunpoint_X_exp = build_gunpoint(verbose=False, random_state=0)[10:]

    lasts_cbf_knn = load_multiple_lasts(
        "./saved/knn/cbf/matched_uniform_saxdt", simple_load=True)
    lasts_cbf_resnet = load_multiple_lasts(
        "./saved/resnet/cbf/matched_uniform_saxdt", simple_load=True)

    lasts_coffee_knn = load_multiple_lasts(
        "./saved/knn/coffee/matched_uniform_saxdt", simple_load=True)
    lasts_coffee_resnet = load_multiple_lasts(
        "./saved/resnet/coffee/matched_uniform_saxdt", simple_load=True)

    lasts_ecg200_knn = load_multiple_lasts(
        "./saved/knn/ecg200/matched_uniform_saxdt", simple_load=True)
    lasts_ecg200_resnet = load_multiple_lasts(
        "./saved/resnet/ecg200/matched_uniform_saxdt", simple_load=True)

    lasts_gunpoint_knn = load_multiple_lasts(
        "./saved/knn/gunpoint/matched_uniform_saxdt", simple_load=True)
    lasts_gunpoint_resnet = load_multiple_lasts(
        "./saved/resnet/gunpoint/matched_uniform_saxdt", simple_load=True)

    dataset_list = [cbf_X_exp, ecg200_X_exp, coffee_X_exp, gunpoint_X_exp]
    lasts_lists = [
        lasts_cbf_knn, lasts_cbf_resnet, lasts_coffee_knn, lasts_coffee_resnet,
        lasts_ecg200_knn, lasts_ecg200_resnet, lasts_gunpoint_knn,
        lasts_gunpoint_resnet
    ]
    lasts_lists_names = [
        "lasts_cbf_knn", "lasts_cbf_resnet", "lasts_coffee_knn",
        "lasts_coffee_resnet", "lasts_ecg200_knn", "lasts_ecg200_resnet",
        "lasts_gunpoint_knn", "lasts_gunpoint_resnet"
    ]
    global_names = [
        "global_cbf", "global_coffee", "global_ecg200", "global_gunpoint"
    ]
    df_dict = dict()
    for dataset, name in zip(dataset_list, global_names):
        df_dict[name] = usefulness_scores_real(dataset[0], dataset[1])
    for lasts_list, name in zip(lasts_lists, lasts_lists_names):
        df_dict[name] = usefulness_scores_lasts(lasts_list)
    return df_dict
def get_predictions():

    lasts_cbf_knn = load_multiple_lasts(
        "./saved/knn/cbf/matched_uniform_saxdt", simple_load=True)
    lasts_cbf_resnet = load_multiple_lasts(
        "./saved/resnet/cbf/matched_uniform_saxdt", simple_load=True)

    lasts_coffee_knn = load_multiple_lasts(
        "./saved/knn/coffee/matched_uniform_saxdt", simple_load=True)
    lasts_coffee_resnet = load_multiple_lasts(
        "./saved/resnet/coffee/matched_uniform_saxdt", simple_load=True)

    lasts_ecg200_knn = load_multiple_lasts(
        "./saved/knn/ecg200/matched_uniform_saxdt", simple_load=True)
    lasts_ecg200_resnet = load_multiple_lasts(
        "./saved/resnet/ecg200/matched_uniform_saxdt", simple_load=True)

    lasts_gunpoint_knn = load_multiple_lasts(
        "./saved/knn/gunpoint/matched_uniform_saxdt", simple_load=True)
    lasts_gunpoint_resnet = load_multiple_lasts(
        "./saved/resnet/gunpoint/matched_uniform_saxdt", simple_load=True)

    lasts_lists = [
        lasts_cbf_knn, lasts_cbf_resnet, lasts_coffee_knn, lasts_coffee_resnet,
        lasts_ecg200_knn, lasts_ecg200_knn, lasts_ecg200_resnet,
        lasts_gunpoint_knn, lasts_gunpoint_resnet
    ]
    names = [
        "cbf_knn", "cbf_resnet", "coffee_knn", "coffee_resnet", "ecg200_knn",
        "ecg200_knn", "ecg200_resnet", "gunpoint_knn", "gunpoint_resnet"
    ]

    dict_exp = defaultdict(list)
    dict_tss = defaultdict(list)
    i = 0
    for lasts_list, name in zip(lasts_lists, names):
        print(i + 1)
        for lasts_ in lasts_list:
            pred_importances, tss = lasts_.surrogate.predict_explanation(
                lasts_.z_tilde, 1)
            dict_tss[name].append(tss)
            dict_exp[name].append(pred_importances)
        dump(dict_exp, "./stab_temp/dict_exp_" + str(i) + ".joblib")
        dump(dict_tss, "./stab_temp/dict_tss_" + str(i) + ".joblib")
        i += 1
    return dict_exp, dict_tss
def get_instability_df(latent=False, compute_lasts=True, compute_shap=True):
    shap_kwargs = {
        "nsamples": 1000,
        "background": "linear_consecutive",
        "pen": 1,
        "model": "rbf",
        "jump": 5,
        "plot": False,
        "figsize": (20, 3),
        "segments_size": None
    }

    lasts_cbf_knn = load_multiple_lasts(
        "./saved/knn/cbf/matched_uniform_saxdt", simple_load=True)
    lasts_cbf_resnet = load_multiple_lasts(
        "./saved/resnet/cbf/matched_uniform_saxdt", simple_load=True)

    lasts_coffee_knn = load_multiple_lasts(
        "./saved/knn/coffee/matched_uniform_saxdt", simple_load=True)
    lasts_coffee_resnet = load_multiple_lasts(
        "./saved/resnet/coffee/matched_uniform_saxdt", simple_load=True)

    lasts_ecg200_knn = load_multiple_lasts(
        "./saved/knn/ecg200/matched_uniform_saxdt", simple_load=True)
    lasts_ecg200_resnet = load_multiple_lasts(
        "./saved/resnet/ecg200/matched_uniform_saxdt", simple_load=True)

    lasts_gunpoint_knn = load_multiple_lasts(
        "./saved/knn/gunpoint/matched_uniform_saxdt", simple_load=True)
    lasts_gunpoint_resnet = load_multiple_lasts(
        "./saved/resnet/gunpoint/matched_uniform_saxdt", simple_load=True)

    lasts_lists = [
        lasts_cbf_knn, lasts_cbf_resnet, lasts_coffee_knn, lasts_coffee_resnet,
        lasts_ecg200_knn, lasts_ecg200_knn, lasts_ecg200_resnet,
        lasts_gunpoint_knn, lasts_gunpoint_resnet
    ]
    names = [
        "cbf_knn", "cbf_resnet", "coffee_knn", "coffee_resnet", "ecg200_knn",
        "ecg200_knn", "ecg200_resnet", "gunpoint_knn", "gunpoint_resnet"
    ]
    df_dict = dict()
    i = 0
    for lasts_list, name in zip(lasts_lists, names):
        print(i + 1, "/", len(lasts_lists))
        i += 1
        df1 = instability_multiple(lasts_list=lasts_list,
                                   combination_name=name,
                                   shap_kwargs=shap_kwargs,
                                   latent=True,
                                   compute_lasts=compute_lasts,
                                   compute_shap=compute_shap)
        df2 = instability_multiple(lasts_list=lasts_list,
                                   combination_name=name,
                                   shap_kwargs=shap_kwargs,
                                   latent=False,
                                   compute_lasts=compute_lasts,
                                   compute_shap=compute_shap)
        df_dict[name + "_lasts_latent"] = df1
        df_dict[name + "_lasts_manifest"] = df2
        dump(df_dict, "./stability_saved/df_dict_" + str(i - 1) + ".joblib")
    return df_dict