Exemplo n.º 1
0
def plot3f(years=[2001,2002,2003, 2004, 2005, 2006, 2007]):
    
    data_dir = "OneDrive\Dokumente\Sc_Master\Masterthesis\Project\DomAdapt"
    X, Y = preprocessing.get_splits(sites = ['hyytiala'],
                                years = [2008],
                                datadir = os.path.join(data_dir, "data"), 
                                dataset = "profound",
                                simulations = None)
    
    predictions_test, errors = finetuning.featureExtractorC("mlp", 10, None, 50,
                      years = years)
    Y_preds = np.array(predictions_test)
    
    visualizations.plot_prediction(Y, Y_preds, "Hyytiälä (2008)")
    plt.legend(loc="upper right")
    
    mae = metrics.mean_absolute_error(Y, np.mean(Y_preds, 0))
    plt.text(10,10, f"MAE = {np.round(mae, 4)}")
Exemplo n.º 2
0
def plot4(w, model, years=[2001,2002,2003, 2004, 2005, 2006, 2007]):
    
    def moving_average(x, w):
        return np.convolve(x, np.ones(w), 'valid') / w

    data_dir = "OneDrive\Dokumente\Sc_Master\Masterthesis\Project\DomAdapt"
    X, Y = preprocessing.get_splits(sites = ['hyytiala'],
                                years = [2008],
                                datadir = os.path.join(data_dir, "data"), 
                                dataset = "profound",
                                simulations = None)
    Y_preles = pd.read_csv(os.path.join(data_dir ,r"data\profound\outputhyytiala2008calib"), sep=";")
    Y_nn = np.transpose(np.load(r"OneDrive\Dokumente\Sc_Master\Masterthesis\Project\DomAdapt\python\outputs\models\mlp0\noPool\sigmoid\y_preds.npy", allow_pickle=True).squeeze(2))
    predictions_test, errors = finetuning.featureExtractorC("mlp", 10, None, 50,
                      years = years)
    Y_nn_f = np.transpose(np.array(predictions_test).squeeze(2))
    
    mt = moving_average(Y.squeeze(1), w)
    mp = moving_average(Y_preles.squeeze(1), w)
    mn = moving_average(np.mean(Y_nn, axis=1), w)
    mnf = moving_average(np.mean(Y_nn_f, axis=1), w)
    
    plt.figure(num=None, figsize=(7, 7), facecolor='w', edgecolor='k')
    plt.plot(mt, label="Groundtruth", color="lightgrey")
    if model=="preles":
        plt.plot(mp, label="PRELES \npredictions", color="green")
        maep = metrics.mean_absolute_error(mt, mp)
        plt.text(10,9, f"MAE = {np.round(maep, 4)}")
    elif model=="mlp0":
        plt.plot(mn, label="MLP \npredictions", color="green")
        maen = metrics.mean_absolute_error(mt, mn)
        plt.text(10,9, f"MAE = {np.round(maen, 4)}")
    elif model=="mlp10":
        plt.plot(mnf, label="Finetuned MLP \npredictions", color="green")
        maen = metrics.mean_absolute_error(mt, mnf)
        plt.text(10,9, f"MAE = {np.round(maen, 4)}")
    plt.xlabel("Day of Year")
    plt.ylabel("Average GPP over 7 days [g C m$^{-2}$ day$^{-1}$]")
    plt.legend()
Exemplo n.º 3
0
                [sp_df.loc[(sp_df.task =="sparse_finetuning") & (sp_df.finetuned_type == "C-OLS")]["rmse_val"]]]
    
    m = ['o', "*"]
    s = [60, 200]
    labs = ["selected", "OLS"]
    for i in range(len(xi)):
        if log:
            plt.scatter(np.log(xi[i]), np.log(yi[i]), alpha = 0.8, color = colors[i], marker=m[i], s = s[i], label=labs[i])
            plt.xlabel("Log(Mean Absolute Error)")
            plt.ylabel("Log(Root Mean Squared Error)")
        else:
            plt.scatter(xi[i], yi[i], alpha = 0.8, color = colors[i], marker=m[i], s = s[i], label=labs[i])
            plt.xlabel("Mean Absolute Error")
            plt.ylabel("Root Mean Squared Error")
        plt.legend(loc="lower right")
        plt.locator_params(axis='y', nbins=7)
        plt.locator_params(axis='x', nbins=7)    
#%%
plot1()
#%%
import finetuning
import matplotlib.pyplot as plt

predictions_test, errors = finetuning.featureExtractorC("mlp", 10, None, 50, classifier = "ols", 
                      years = [2005, 2006], sparse=2)

preds = np.array(predictions_test).squeeze(2)
plt.plot(np.transpose(preds))

errors = np.mean(np.array(errors), axis=1)
Exemplo n.º 4
0
def plot5(model, w = None, years=[2001,2002,2003, 2004, 2005, 2006, 2007]):
    
    def moving_average(x, w):
        return np.convolve(x, np.ones(w), 'valid') / w
    
    data_dir = "OneDrive\Dokumente\Sc_Master\Masterthesis\Project\DomAdapt"
    X, Y = preprocessing.get_splits(sites = ['hyytiala'],   
                                years = [2008],
                                datadir = os.path.join(data_dir, "data"), 
                                dataset = "profound",
                                simulations = None)

    Y_preles = pd.read_csv(os.path.join(data_dir ,r"data\profound\outputhyytiala2008calib"), sep=";")
    Y_nn = np.transpose(np.load(r"OneDrive\Dokumente\Sc_Master\Masterthesis\Project\DomAdapt\python\outputs\models\mlp0\noPool\sigmoid\y_preds.npy", allow_pickle=True).squeeze(2))
    predictions_test, errors = finetuning.featureExtractorC("mlp", 10, None, 50,
                      years = years)
    Y_nn_f = np.transpose(np.array(predictions_test).squeeze(2))
    
    if not w is None:
        Y = moving_average(Y.squeeze(1), w)
        Y_preles = moving_average(Y_preles.squeeze(1), w)
        Y_nn = moving_average(np.mean(Y_nn, axis=1), w)
        Y_nn_f = moving_average(np.mean(Y_nn_f, axis=1), w)
    else:
        Y = Y.squeeze(1)
        Y_preles = Y_preles.squeeze(1)
        Y_nn = np.mean(Y_nn, axis=1)
        Y_nn_f = np.mean(Y_nn_f, axis=1)

    plt.figure(num=None, figsize=(7, 7), facecolor='w', edgecolor='k')
    if model == "preles":
        plt.scatter(Y_preles, Y, color="darkblue")
        # Fit with polyfit
        b, m = polyfit(Y_preles, Y,  1)
        r2_p = rsquared(Y_preles, Y,  1)["determination"]
        plt.plot(Y_preles, b + m * Y_preles, '-', color="darkred", label = "y = a + b $\hat{y}$ ")
        maep = metrics.mean_absolute_error(Y, Y_preles)
        plt.text(0,10, f"MAE = {np.round(maep, 4)}")
        plt.text(0,9, f"R$^2$ = {np.round(r2_p, 4)}")
    elif model == "mlp0":
        plt.scatter(Y_nn, Y, color="darkblue")
        # Fit with polyfit
        b, m = polyfit(Y_nn, Y, 1)
        r2_nn = rsquared(Y_nn, Y,  1)["determination"]
        plt.plot(Y_nn, b + m *Y_nn, '-', color="darkred", label = "y = a + b $\hat{y}$ ")
        maen = metrics.mean_absolute_error(Y, Y_nn)
        plt.text(0,10, f"MAE = {np.round(maen, 4)}")
        plt.text(0,9, f"R$^2$ = {np.round(r2_nn, 4)}")
    elif model == "mlp10":
        plt.scatter(Y_nn_f, Y, color="darkblue")
        b, m = polyfit(Y_nn_f, Y, 1)
        r2_nnf = rsquared(Y_nn_f, Y,  1)["determination"]
        plt.plot(Y_nn_f, b + m * Y_nn_f, '-', color="darkred", label = "y = a + b $\hat{y}$ ")
        maenf = metrics.mean_absolute_error(Y, Y_nn_f)
        plt.text(0,10, f"MAE = {np.round(maenf, 4)}")
        plt.text(0,9, f"R$^2$ = {np.round(r2_nnf, 4)}")
    
    plt.plot(np.arange(11), 0 + 1 *np.arange(11), '--', color="gray", label = "y = $\hat{y}$")
    plt.xlim((-1,11))
    plt.ylim((-1,11))
    plt.ylabel("True GPP Test [g C m$^{-2}$ day$^{-1}$]")
    plt.xlabel("Estimated GPP Test [g C m$^{-2}$ day$^{-1}$]")
    
    plt.legend(loc="lower right")
Exemplo n.º 5
0
def feature_extraction_results(types, simsfrac):

    domadapt_errors = []
    domadapt_predictions = {}
    running_losses = {}

    for typ in types:
        for frac in simsfrac:

            predictions, errors, Y_test = finetuning.featureExtractorA(
                "mlp", typ, None, frac)
            errors = np.mean(np.array(errors), 1)
            domadapt_errors.append([
                f"MLP{typ}D0{frac}FA", "mlp", typ, 5, frac, "A", 0, None,
                errors[0], errors[1], errors[2], errors[3], "finetuning"
            ])
            domadapt_predictions["A-None"] = predictions

            # 1) Ordinary Least Squares as Classifier
            predictions_ols, errors = finetuning.featureExtractorC(
                "mlp", typ, None, frac, "ols")
            errors = np.mean(np.array(errors), axis=1)
            domadapt_errors.append([
                f"MLP{typ}D0{frac}FC1", "mlp", typ, 5, frac, "C-OLS", 0, None,
                errors[0], errors[1], errors[2], errors[3], "finetuning"
            ])
            domadapt_predictions["C-OLS"] = predictions_ols
            #visualizations.plot_prediction(Y_test, predictions_ols, "OLS")

            # 2) Non-negative Least Squares as Classifier
            predictions_nnls, errors = finetuning.featureExtractorC(
                "mlp", typ, None, frac, "nnls")
            errors = np.mean(np.array(errors), axis=1)
            domadapt_errors.append([
                f"MLP{typ}D0{frac}FC2", "mlp", typ, 5, frac, "C-NNLS", 0, None,
                errors[0], errors[1], errors[2], errors[3], "finetuning"
            ])
            domadapt_predictions["C-NNLS"] = predictions_nnls
            #visualizations.plot_prediction(Y_test, predictions_nnls, "Non-negative least squares")

            #3) MLP with architecture 2 as Classifier
            rl, errors, predictions_mlp2 = finetuning.featureExtractorD(
                "mlp", typ, 500, frac)
            errors = np.mean(np.array(errors), 0)
            domadapt_errors.append([
                f"MLP{typ}D0{frac}FD", "mlp", typ, 5, frac, "D-MLP2", 0, 500,
                errors[0], errors[1], errors[2], errors[3], "finetuning"
            ])
            running_losses["D-MLP2"] = rl
            domadapt_predictions["D-MLP2"] = predictions_mlp2

            ## Feature Extractor B due to computation time only used on cluster! ##
            ## LOADING RESULTS ##

            #4) Full Backprob with pretrained weights
            rets_fb = pd.read_csv(
                f"OneDrive\Dokumente\Sc_Master\Masterthesis\Project\DomAdapt\python\outputs\models\mlp7\\nodropout\sims_frac{frac}\\tuned\setting0\selected_results.csv"
            )
            rl_fb = np.load(
                f"OneDrive\Dokumente\Sc_Master\Masterthesis\Project\DomAdapt\python\outputs\models\mlp7\\nodropout\sims_frac{frac}\\tuned\setting0\\running_losses.npy",
                allow_pickle=True)
            domadapt_errors.append([
                f"MLP{typ}D0{frac}FB1", "mlp", typ, 5, frac, "B-fb", 0,
                rets_fb["epochs"][0], rets_fb["rmse_train"][0],
                rets_fb["rmse_val"][0], rets_fb["mae_train"][0],
                rets_fb["mae_val"][0], "finetuning"
            ])
            running_losses["B-full_backprop"] = rl_fb

            #5) Backprop only last layer.
            rets_hb = pd.read_csv(
                f"OneDrive\Dokumente\Sc_Master\Masterthesis\Project\DomAdapt\python\outputs\models\mlp7\\nodropout\sims_frac{frac}\\tuned\setting1\selected_results.csv"
            )
            rl_fw2 = np.load(
                f"OneDrive\Dokumente\Sc_Master\Masterthesis\Project\DomAdapt\python\outputs\models\mlp7\\nodropout\sims_frac{frac}\\tuned\setting1\\running_losses.npy",
                allow_pickle=True)
            domadapt_errors.append([
                f"MLP{typ}D0{frac}FB2", "mlp", typ, 5, frac, "B-fW2", 0,
                rets_hb["epochs"][0], rets_hb["rmse_train"][0],
                rets_hb["rmse_val"][0], rets_hb["mae_train"][0],
                rets_hb["mae_val"][0], "finetuning"
            ])
            running_losses["B-freezeW2"] = rl_fw2

    domadapt_results = pd.DataFrame(domadapt_errors,
                                    columns=[
                                        "id", "model", "typ", "architecture",
                                        "simsfrac", "finetuned_type",
                                        "dropout", "epochs", "rmse_train",
                                        "rmse_val", "mae_train", "mae_val",
                                        "task"
                                    ])

    domadapt_results.to_excel(
        r"OneDrive\Dokumente\Sc_Master\Masterthesis\Project\DomAdapt\results\featureextraction.xlsx"
    )
    domadapt_results.to_csv(
        r"OneDrive\Dokumente\Sc_Master\Masterthesis\Project\DomAdapt\results\tables\featureextraction.csv"
    )

    return (domadapt_results, running_losses, domadapt_predictions)
Exemplo n.º 6
0
def sparse_networks_results(sparses):

    df_sel = pd.DataFrame(columns=[
        "id", "model", "typ", "simsfrac", "finetuned_type", "dropout",
        "epochs", "rmse_train", "rmse_val", "mae_train", "mae_val", "task"
    ])

    for sparse in sparses:
        l = visualizations.losses("mlp",
                                  0,
                                  f"sparse{sparse}",
                                  sparse=True,
                                  plot=False)
        df_sel = df_sel.append(
            {
                "id": f"MLP0{sparse}1D0",
                "model": "mlp",
                "typ": 0,
                "simsfrac": None,
                "finetuned_type": None,
                "dropout": 0,
                "epochs": 1000,
                "rmse_train": l["rmse_train"][0],
                "rmse_val": l["rmse_val"][0],
                "mae_train": l["mae_val"][0],
                "mae_val": l["mae_val"][0],
                "task": "sparse_selected"
            },
            ignore_index=True)

    settings = ["B-fb", "B-fW2"]
    epochs = [5000, 40000]
    for i in range(len(settings)):
        for typ in [6, 7, 8]:
            for sparse in sparses:
                l = visualizations.losses("mlp",
                                          typ,
                                          f"sparse1\setting{i}",
                                          sparse=True,
                                          plot=False)
                df_sel = df_sel.append(
                    {
                        "id": f"MLP0S{sparse}D0",
                        "model": "mlp",
                        "typ": typ,
                        "simsfrac": 30,
                        "finetuned_type": settings[i],
                        "dropout": 0,
                        "epochs": epochs[i],
                        "rmse_train": l["rmse_train"][0],
                        "rmse_val": l["rmse_val"][0],
                        "mae_train": l["mae_train"][0],
                        "mae_val": l["mae_val"][0],
                        "task": "sparse_finetuning"
                    },
                    ignore_index=True)

    years_list = [
        [2006]
    ]  #, [2005, 2006], [2004,2005,2006], [2003,2004,2005,2006], [2001, 2003,2004,2005,2006]]
    for typ in [5, 6, 7, 8, 9, 10]:
        for i in range(len(years_list)):
            predictions_test, errors = finetuning.featureExtractorC(
                "mlp",
                typ,
                None,
                30,
                classifier="ols",
                years=years_list[i],
                sparse=1)
            errors = np.mean(np.array(errors), axis=1)
            df_sel = df_sel.append(
                {
                    "id": f"MLP0S{i+1}D0",
                    "model": "mlp",
                    "typ": typ,
                    "simsfrac": 30,
                    "finetuned_type": "C-OLS",
                    "dropout": 0,
                    "epochs": None,
                    "rmse_train": errors[0],
                    "rmse_val": errors[1],
                    "mae_train": errors[2],
                    "mae_val": errors[3],
                    "task": "sparse_finetuning"
                },
                ignore_index=True)

    for typ in [5, 6, 7, 8, 9, 10]:
        predictions_test, errors = finetuning.featureExtractorC(
            "mlp",
            typ,
            None,
            30,
            classifier="ols",
            years=[2006],
            random_days=30)
        errors = np.mean(np.array(errors), axis=1)
        df_sel = df_sel.append(
            {
                "id": f"MLP0S0D0",
                "model": "mlp",
                "typ": typ,
                "simsfrac": 30,
                "finetuned_type": "C-OLS",
                "dropout": 0,
                "epochs": None,
                "rmse_train": errors[0],
                "rmse_val": errors[1],
                "mae_train": errors[2],
                "mae_val": errors[3],
                "task": "sparse_finetuning"
            },
            ignore_index=True)

    df_sel.to_excel(
        r"OneDrive\Dokumente\Sc_Master\Masterthesis\Project\DomAdapt\results\sparsenetworks.xlsx"
    )
    df_sel.to_csv(
        r"OneDrive\Dokumente\Sc_Master\Masterthesis\Project\DomAdapt\results\tables\sparsenetworks.csv"
    )

    return (df_sel)
Exemplo n.º 7
0
def selected_networks_results(simsfrac):

    df_sel = pd.DataFrame(columns=[
        "id", "model", "typ", "simsfrac", "finetuned_type", "dropout",
        "epochs", "rmse_train", "rmse_val", "mae_train", "mae_val", "task"
    ])

    l = visualizations.losses("mlp", 0, r"relu", plot=False)
    df_sel = df_sel.append(
        {
            "id": "MLP0base",
            "model": "mlp",
            "typ": 0,
            "simsfrac": None,
            "finetuned_type": None,
            "dropout": 0,
            "epochs": 5000,
            "rmse_train": l["rmse_train"][0],
            "rmse_val": l["rmse_val"][0],
            "mae_train": l["mae_train"][0],
            "mae_val": l["mae_val"][0],
            "task": "selected"
        },
        ignore_index=True)

    l = visualizations.losses("mlp", 5, r"relu", plot=False)
    df_sel = df_sel.append(
        {
            "id": "MLP5base",
            "model": "mlp",
            "typ": 5,
            "simsfrac": None,
            "finetuned_type": None,
            "dropout": 0,
            "epochs": 5000,
            "rmse_train": l["rmse_train"][0],
            "rmse_val": l["rmse_val"][0],
            "mae_train": l["mae_train"][0],
            "mae_val": l["mae_val"][0],
            "task": "selected"
        },
        ignore_index=True)

    l = visualizations.losses("mlp", 4, r"relu", plot=False)
    df_sel = df_sel.append(
        {
            "id": "MLP4base",
            "model": "mlp",
            "typ": 4,
            "simsfrac": None,
            "finetuned_type": None,
            "dropout": 0,
            "epochs": 5000,
            "rmse_train": l["rmse_train"][0],
            "rmse_val": l["rmse_val"][0],
            "mae_train": l["mae_train"][0],
            "mae_val": l["mae_val"][0],
            "task": "selected"
        },
        ignore_index=True)

    l = visualizations.losses("cnn", 0, r"", plot=False)
    df_sel = df_sel.append(
        {
            "id": "CNN0base",
            "model": "cnn",
            "typ": 0,
            "simsfrac": None,
            "finetuned_type": None,
            "dropout": 0,
            "epochs": 10000,
            "rmse_train": l["rmse_train"][0],
            "rmse_val": l["rmse_val"][0],
            "mae_train": l["mae_train"][0],
            "mae_val": l["mae_val"][0],
            "task": "selected"
        },
        ignore_index=True)

    l = visualizations.losses("lstm", 0, r"", plot=False)
    df_sel = df_sel.append(
        {
            "id": "LSTM0base",
            "model": "lstm",
            "typ": 0,
            "simsfrac": None,
            "finetuned_type": None,
            "dropout": 0,
            "epochs": 10000,
            "rmse_train": l["rmse_train"][0],
            "rmse_val": l["rmse_val"][0],
            "mae_train": l["mae_train"][0],
            "mae_val": l["mae_val"][0],
            "task": "selected"
        },
        ignore_index=True)

    l = np.load(
        r"OneDrive\Dokumente\Sc_Master\Masterthesis\Project\DomAdapt\python\outputs\models\rf0\errors.npy"
    )
    df_sel = df_sel.append(
        {
            "id": "RF0base",
            "model": "rf",
            "typ": 0,
            "simsfrac": None,
            "finetuned_type": None,
            "dropout": None,
            "epochs": None,
            "rmse_train": l[0],
            "rmse_val": l[1],
            "mae_train": l[2],
            "mae_val": l[3],
            "task": "selected"
        },
        ignore_index=True)

    epochs = [5000, 10000, 20000, 30000]
    for i in range(len(simsfrac)):
        l = visualizations.losses("mlp",
                                  5,
                                  f"nodropout\dummies\sims_frac{simsfrac[i]}",
                                  plot=False)
        df_sel = df_sel.append(
            {
                "id": f"MLP5DD0{simsfrac[i]}P0",
                "model": "mlp",
                "typ": 5,
                "simsfrac": simsfrac[i],
                "finetuned_type": None,
                "dropout": 0.0,
                "epochs": epochs[i],
                "rmse_train": l["rmse_train"][0],
                "rmse_val": l["rmse_val"][0],
                "mae_train": l["mae_train"][0],
                "mae_val": l["mae_val"][0],
                "task": "pretraining"
            },
            ignore_index=True)
    epochs = [5000, 5000, 5000, 5000]
    for i in range(len(simsfrac)):
        l = visualizations.losses(
            "mlp",
            5,
            f"nodropout\dummies\sims_frac{simsfrac[i]}\\tuned\setting0",
            plot=False)
        df_sel = df_sel.append(
            {
                "id": f"MLP5DD0{simsfrac[i]}P0",
                "model": "mlp",
                "typ": 5,
                "simsfrac": simsfrac[i],
                "finetuned_type": "full",
                "dropout": 0.0,
                "epochs": epochs[i],
                "rmse_train": l["rmse_train"][0],
                "rmse_val": l["rmse_val"][0],
                "mae_train": l["mae_train"][0],
                "mae_val": l["mae_val"][0],
                "task": "finetuning"
            },
            ignore_index=True)

    epochs = [20000, 20000, 20000, 20000]
    for i in range(len(simsfrac)):
        l = visualizations.losses(
            "mlp",
            5,
            f"nodropout\dummies\sims_frac{simsfrac[i]}\\tuned\setting1\\freeze2",
            plot=False)
        df_sel = df_sel.append(
            {
                "id": f"MLP5DD0{simsfrac[i]}P0",
                "model": "mlp",
                "typ": 5,
                "simsfrac": simsfrac[i],
                "finetuned_type": "partial",
                "dropout": 0.0,
                "epochs": epochs[i],
                "rmse_train": l["rmse_train"][0],
                "rmse_val": l["rmse_val"][0],
                "mae_train": l["mae_train"][0],
                "mae_val": l["mae_val"][0],
                "task": "finetuning"
            },
            ignore_index=True)

    for typ in [7, 10, 12, 13, 14]:
        epochs = [5000, 10000, 10000, 20000]
        for i in range(len(simsfrac)):
            l = visualizations.losses("mlp",
                                      typ,
                                      f"nodropout\sims_frac{simsfrac[i]}",
                                      plot=False)
            df_sel = df_sel.append(
                {
                    "id": f"MLP{typ}DD0{simsfrac[i]}P0",
                    "model": "mlp",
                    "typ": typ,
                    "simsfrac": simsfrac[i],
                    "finetuned_type": None,
                    "dropout": 0.0,
                    "epochs": epochs[i],
                    "rmse_train": l["rmse_train"][0],
                    "rmse_val": l["rmse_val"][0],
                    "mae_train": l["mae_train"][0],
                    "mae_val": l["mae_val"][0],
                    "task": "finetuning"
                },
                ignore_index=True)

    for typ in [7, 10, 12, 13, 14]:
        epochs = [5000, 5000, 5000, 5000]
        for i in range(len(simsfrac)):
            l = visualizations.losses(
                "mlp",
                typ,
                f"nodropout\sims_frac{simsfrac[i]}\\tuned\setting0",
                plot=False)
            df_sel = df_sel.append(
                {
                    "id": f"MLP{typ}DD0{simsfrac[i]}P0",
                    "model": "mlp",
                    "typ": typ,
                    "simsfrac": simsfrac[i],
                    "finetuned_type": "full",
                    "dropout": 0.0,
                    "epochs": epochs[i],
                    "rmse_train": l["rmse_train"][0],
                    "rmse_val": l["rmse_val"][0],
                    "mae_train": l["mae_train"][0],
                    "mae_val": l["mae_val"][0],
                    "task": "finetuning"
                },
                ignore_index=True)

    for typ in [7, 10, 12, 13, 14]:
        epochs = [5000, 5000, 5000, 5000]
        for i in range(len(simsfrac)):
            l = visualizations.losses(
                "mlp",
                typ,
                f"nodropout\sims_frac{simsfrac[i]}\\tuned\setting1",
                plot=False)
            df_sel = df_sel.append(
                {
                    "id": f"MLP{typ}DD0{simsfrac[i]}P0",
                    "model": "mlp",
                    "typ": typ,
                    "simsfrac": simsfrac[i],
                    "finetuned_type": "partial",
                    "dropout": 0.0,
                    "epochs": epochs[i],
                    "rmse_train": l["rmse_train"][0],
                    "rmse_val": l["rmse_val"][0],
                    "mae_train": l["mae_train"][0],
                    "mae_val": l["mae_val"][0],
                    "task": "finetuning"
                },
                ignore_index=True)

    pre = preles_errors("hyytiala")
    df_sel = df_sel.append(
        {
            "id": "preles2008hy",
            "model": "preles",
            "typ": 0,
            "simsfrac": None,
            "finetuned_type": None,
            "dropout": None,
            "epochs": None,
            "rmse_train": pre[0],
            "rmse_val": pre[1],
            "mae_train": pre[2],
            "mae_val": pre[3],
            "task": "processmodel"
        },
        ignore_index=True)

    pre = preles_errors("bily_kriz")
    df_sel = df_sel.append(
        {
            "id": "preles2008bk",
            "model": "preles",
            "typ": 2,
            "simsfrac": None,
            "finetuned_type": None,
            "dropout": None,
            "epochs": None,
            "rmse_train": pre[0],
            "rmse_val": pre[1],
            "mae_train": pre[2],
            "mae_val": pre[3],
            "task": "processmodel"
        },
        ignore_index=True)

    types = [7, 10, 12]
    for typ in types:
        for frac in simsfrac:
            predictions, errors = finetuning.featureExtractorA("mlp",
                                                               typ,
                                                               None,
                                                               frac,
                                                               dummies=False)
            errors = np.mean(np.array(errors), 1)
            df_sel = df_sel.append(
                {
                    "id": f"MLP{typ}D0{frac}A",
                    "model": "mlp",
                    "typ": typ,
                    "simsfrac": frac,
                    "finetuned_type": "A",
                    "dropout": 0,
                    "epochs": None,
                    "rmse_train": errors[0],
                    "rmse_val": errors[1],
                    "mae_train": errors[2],
                    "mae_val": errors[3],
                    "task": "pretraining_A"
                },
                ignore_index=True)

    types = [7, 10, 12]
    frac = 100
    for typ in types:
        predictions, errors = finetuning.featureExtractorC("mlp",
                                                           typ,
                                                           None,
                                                           frac,
                                                           dummies=False)
        errors = np.mean(np.array(errors), 1)
        df_sel = df_sel.append(
            {
                "id": f"MLP{typ}D0{frac}A",
                "model": "mlp",
                "typ": typ,
                "simsfrac": frac,
                "finetuned_type": "OLS",
                "dropout": 0,
                "epochs": None,
                "rmse_train": errors[0],
                "rmse_val": errors[1],
                "mae_train": errors[2],
                "mae_val": errors[3],
                "task": "finetuning"
            },
            ignore_index=True)

    types = [5, 13, 14]
    for typ in types:
        for frac in simsfrac:
            predictions, errors = finetuning.featureExtractorA("mlp",
                                                               typ,
                                                               None,
                                                               frac,
                                                               dummies=True)
            errors = np.mean(np.array(errors), 1)
            df_sel = df_sel.append(
                {
                    "id": f"MLP{typ}D0{frac}A",
                    "model": "mlp",
                    "typ": typ,
                    "simsfrac": frac,
                    "finetuned_type": "A",
                    "dropout": 0,
                    "epochs": None,
                    "rmse_train": errors[0],
                    "rmse_val": errors[1],
                    "mae_train": errors[2],
                    "mae_val": errors[3],
                    "task": "pretraining_A"
                },
                ignore_index=True)

    types = [5, 13, 14]  #13 hat zu große Ausreißer..
    frac = 100
    for typ in types:
        predictions, errors = finetuning.featureExtractorC("mlp",
                                                           typ,
                                                           None,
                                                           frac,
                                                           dummies=True)
        errors = np.mean(np.array(errors), 1)
        df_sel = df_sel.append(
            {
                "id": f"MLP{typ}D0{frac}A",
                "model": "mlp",
                "typ": typ,
                "simsfrac": frac,
                "finetuned_type": "OLS",
                "dropout": 0,
                "epochs": None,
                "rmse_train": errors[0],
                "rmse_val": errors[1],
                "mae_train": errors[2],
                "mae_val": errors[3],
                "task": "finetuning"
            },
            ignore_index=True)

    #preds_er = borealsites_predictions()["mlp_prediction_errors"]
    #df_sel = df_sel.append({"id":"mlp0nP2D0Rbs",
    #                                "model":"mlp",
    #                                "typ":0,
    #                                "simsfrac":None,
    #                                "finetuned_type":None,
    #                                "dropout":None,
    #                                "epochs":None,
    #                                "rmse_train":None,
    #                                "rmse_val":preds_er[0],
    #                                "mae_train":None,
    #                                "mae_val":preds_er[1],
    #                                "task":"borealsitesprediction"}, ignore_index=True)

    #preds_er = borealsites_predictions()["preles_prediction_errors"]
    #df_sel = df_sel.append({"id":"prelesbs",
    #                                "model":"preles",
    #                                "typ":3,
    #                                "simsfrac":None,
    #                                "finetuned_type":None,
    #                                "dropout":None,
    #                                "epochs":None,
    #                                "rmse_train":None,
    #                                "rmse_val":preds_er[0],
    #                                "mae_train":None,
    #                                "mae_val":preds_er[1],
    #                                "task":"borealsitesprediction"}, ignore_index=True)

    df_sel.to_excel(
        r"OneDrive\Dokumente\Sc_Master\Masterthesis\Project\DomAdapt\results\selectednetworks_final.xlsx"
    )
    df_sel.to_csv(
        r"OneDrive\Dokumente\Sc_Master\Masterthesis\Project\DomAdapt\results\tables\selectednetworks_final.csv"
    )

    return (df_sel)