Exemple #1
0
def train_model_CV(hparams,
                   model_design,
                   X,
                   Y,
                   eval_set,
                   dropout_prob,
                   dropout,
                   data_dir,
                   save,
                   splits=5):
    """
    
    
    """
    epochs = hparams["epochs"]
    featuresize = model_design["featuresize"]

    kf = KFold(n_splits=splits, shuffle=False)
    kf.get_n_splits(X)

    rmse_train = np.zeros((splits, epochs))
    rmse_val = np.zeros((splits, epochs))
    mae_train = np.zeros((splits, epochs))
    mae_val = np.zeros((splits, epochs))

    # z-score data
    #X_mean, X_std = np.mean(X), np.std(X)
    #X = utils.minmax_scaler(X)

    if not eval_set is None:
        print("Test set used for model evaluation")
        Xt_test = eval_set["X_test"]
        yt_test = eval_set["Y_test"]
        #Xt_test= utils.minmax_scaler(Xt_test, scaling = [X_mean, X_std])
        yt_test = torch.tensor(yt_test).type(dtype=torch.float)
        Xt_test = torch.tensor(Xt_test).type(dtype=torch.float)
        #yt_tests = []

    i = 0

    performance = []
    y_tests = []
    y_preds = []

    for train_index, test_index in kf.split(X):

        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = Y[train_index], Y[test_index]

        X_test = torch.tensor(X_test).type(dtype=torch.float)
        y_test = torch.tensor(y_test).type(dtype=torch.float)
        X_train = torch.tensor(X_train).type(dtype=torch.float)
        y_train = torch.tensor(y_train).type(dtype=torch.float)

        if featuresize is None:
            model = models.MLP(model_design["dimensions"],
                               model_design["activation"])
        else:
            model = models.MLPmod(featuresize, model_design["dimensions"],
                                  model_design["activation"], dropout_prob,
                                  dropout)

        optimizer = optim.Adam(model.parameters(), lr=hparams["learningrate"])
        criterion = nn.MSELoss()

        for epoch in range(epochs):

            # Training
            model.train()

            x, y = utils.create_batches(X_train, y_train, hparams["batchsize"],
                                        hparams["history"])

            x = torch.tensor(x).type(dtype=torch.float)
            y = torch.tensor(y).type(dtype=torch.float)

            output = model(x)

            # Compute training loss
            loss = criterion(output, y)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # Evaluate current model at test set
            model.eval()

            with torch.no_grad():
                pred_train = model(X_train)
                if eval_set is None:
                    pred_test = model(X_test)
                    rmse_train[i, epoch] = utils.rmse(y_train, pred_train)
                    rmse_val[i, epoch] = utils.rmse(y_test, pred_test)
                    mae_train[i, epoch] = metrics.mean_absolute_error(
                        y_train, pred_train)
                    mae_val[i, epoch] = metrics.mean_absolute_error(
                        y_test, pred_test)
                else:
                    pred_test = model(Xt_test)
                    rmse_train[i, epoch] = utils.rmse(y_train, pred_train)
                    rmse_val[i, epoch] = utils.rmse(yt_test, pred_test)
                    mae_train[i, epoch] = metrics.mean_absolute_error(
                        y_train, pred_train)
                    mae_val[i, epoch] = metrics.mean_absolute_error(
                        yt_test, pred_test)

            if save:
                if epoch % 1000 == 0:
                    print("Epoch", epoch, ": Saving model to path.")
                    torch.save(model.state_dict(),
                               os.path.join(data_dir, f"model{i}.pth"))

        # Predict with fitted model
        with torch.no_grad():
            preds_train = model(X_train)
            if eval_set is None:
                preds_test = model(X_test)
                performance.append([
                    utils.rmse(y_train, preds_train),
                    utils.rmse(y_test, preds_test),
                    metrics.mean_absolute_error(y_train, preds_train.numpy()),
                    metrics.mean_absolute_error(y_test, preds_test.numpy())
                ])
            else:
                preds_test = model(Xt_test)
                performance.append([
                    utils.rmse(y_train, preds_train),
                    utils.rmse(yt_test, preds_test),
                    metrics.mean_absolute_error(y_train, preds_train.numpy()),
                    metrics.mean_absolute_error(yt_test, preds_test.numpy())
                ])

        if eval_set is None:
            y_tests.append(y_test.numpy())
        else:
            y_tests.append(yt_test.numpy())

        y_preds.append(preds_test.numpy())

        i += 1

    running_losses = {
        "rmse_train": rmse_train,
        "mae_train": mae_train,
        "rmse_val": rmse_val,
        "mae_val": mae_val
    }

    return (running_losses, performance, y_tests, y_preds)
Exemple #2
0
def training_CV(hparams,
                model_design,
                X,
                Y,
                feature_extraction,
                eval_set,
                featuresize,
                data_dir,
                save,
                splits=5):
    """
    
    
    """

    epochs = hparams["epochs"]

    kf = KFold(n_splits=splits, shuffle=False)
    kf.get_n_splits(X)

    rmse_train = np.zeros((splits, epochs))
    rmse_val = np.zeros((splits, epochs))
    mae_train = np.zeros((splits, epochs))
    mae_val = np.zeros((splits, epochs))

    # z-score data
    #X_mean, X_std = np.mean(X), np.std(X)
    #X = utils.minmax_scaler(X)

    if not eval_set is None:
        print("Test set used for model evaluation")
        Xt_test = eval_set["X_test"]
        #Xt_test= utils.minmax_scaler(Xt_test, scaling = [X_mean, X_std])
        yt_test = eval_set["Y_test"]
        yt_test = torch.tensor(yt_test).type(dtype=torch.float)
        Xt_test = torch.tensor(Xt_test).type(dtype=torch.float)
        yt_tests = []

    i = 0

    performance = []
    y_tests = []
    y_preds = []

    for train_index, test_index in kf.split(X):

        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = Y[train_index], Y[test_index]

        X_test = torch.tensor(X_test).type(dtype=torch.float)
        y_test = torch.tensor(y_test).type(dtype=torch.float)
        X_train = torch.tensor(X_train).type(dtype=torch.float)
        y_train = torch.tensor(y_train).type(dtype=torch.float)

        if isinstance(model_design, dict):
            print("Loading pretrained Model.")
            model = models.MLPmod(featuresize, model_design["dimensions"],
                                  model_design["activation"])
            model.load_state_dict(
                torch.load(os.path.join(data_dir, f"model{i}.pth")))
        else:
            model = model_design
        model.eval()

        if not feature_extraction is None:
            print("Freezing all weights.")
            for child in model.children():
                for name, parameter in child.named_parameters():
                    if not name in feature_extraction:
                        parameter.requires_grad = False
                    #else:
                    #    parameter.requires_grad = False

        criterion = nn.MSELoss()
        optimizer = optim.Adam(model.parameters(), lr=hparams["learningrate"])

        for epoch in range(epochs):

            # Training
            model.train()

            x, y = utils.create_batches(X_train, y_train, hparams["batchsize"],
                                        hparams["history"])

            x = torch.tensor(x).type(dtype=torch.float)
            y = torch.tensor(y).type(dtype=torch.float)

            output = model(x)

            # Compute training loss
            loss = criterion(output, y)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # Evaluate current model at test set
            model.eval()

            with torch.no_grad():
                pred_train = model(X_train)
                if eval_set is None:
                    pred_test = model(X_test)
                    rmse_train[i, epoch] = utils.rmse(y_train, pred_train)
                    rmse_val[i, epoch] = utils.rmse(y_test, pred_test)
                    mae_train[i, epoch] = metrics.mean_absolute_error(
                        y_train, pred_train)
                    mae_val[i, epoch] = metrics.mean_absolute_error(
                        y_test, pred_test)
                else:
                    pred_test = model(Xt_test)
                    rmse_train[i, epoch] = utils.rmse(y_train, pred_train)
                    rmse_val[i, epoch] = utils.rmse(yt_test, pred_test)
                    mae_train[i, epoch] = metrics.mean_absolute_error(
                        y_train, pred_train)
                    mae_val[i, epoch] = metrics.mean_absolute_error(
                        yt_test, pred_test)

        # Predict with fitted model
        with torch.no_grad():
            preds_train = model(X_train)
            if eval_set is None:
                preds_test = model(X_test)
                performance.append([
                    utils.rmse(y_train, preds_train),
                    utils.rmse(y_test, preds_test),
                    metrics.mean_absolute_error(y_train, preds_train.numpy()),
                    metrics.mean_absolute_error(y_test, preds_test.numpy())
                ])
            else:
                preds_test = model(Xt_test)
                performance.append([
                    utils.rmse(y_train, preds_train),
                    utils.rmse(yt_test, preds_test),
                    metrics.mean_absolute_error(y_train, preds_train.numpy()),
                    metrics.mean_absolute_error(yt_test, preds_test.numpy())
                ])

        if save:
            if not feature_extraction is None:
                torch.save(
                    model.state_dict(),
                    os.path.join(data_dir, f"tuned\setting1\model{i}.pth"))
            else:
                torch.save(
                    model.state_dict(),
                    os.path.join(data_dir, f"tuned\setting0\model{i}.pth"))

        y_tests.append(y_test.numpy())
        y_preds.append(preds_test.numpy())

        i += 1

    running_losses = {
        "rmse_train": rmse_train,
        "mae_train": mae_train,
        "rmse_val": rmse_val,
        "mae_val": mae_val
    }

    if eval_set is None:
        return (running_losses, performance, y_tests, y_preds)
    else:
        return (running_losses, performance, yt_tests, y_preds)
Exemple #3
0
    datadir=os.path.join(datadir, "data"),
    dataset="profound",
    simulations=None,
    colnames=["PAR", "TAir", "VPD", "Precip", "fAPAR", "DOY_sin", "DOY_cos"],
    to_numpy=True)

#%% Train

X = utils.minmax_scaler(X)
X = torch.tensor(X).type(dtype=torch.float)
Y = torch.tensor(Y).type(dtype=torch.float)

#model = models.MLP([X.shape[1],12,1], nn.ReLU)
#model = models.LSTM(X.shape[1], 12, 1, 10, F.relu)

x, target = utils.create_batches(X, Y, 128, 0)
#x_test, target_test = utils.create_batches(X, Y, 128, 0)

x = torch.tensor(x).type(dtype=torch.float)
target = torch.tensor(target).type(dtype=torch.float)
#x_test = torch.tensor(x_test).type(dtype=torch.float)
#target_test = torch.tensor(target_test).type(dtype=torch.float)
#%%
hiddensize = [16, 32, 64, 128, 256]
dimensions = [X.shape[1]]
for layer in range(2):
    # randomly pick hiddensize from hiddensize list
    dimensions.append(random.choice(hiddensize))
dimensions.append(Y.shape[1])

model = models.MLP(dimensions, nn.ReLU)
Exemple #4
0
def train_model(
        hparams_add,
        model_design_add,
        X,
        Y,
        X_test,
        Y_test,
        i,
        data_dir="OneDrive\Dokumente\Sc_Master\Masterthesis\Project\DomAdapt"):

    epochs = hparams_add["epochs"]

    rmse_train = np.zeros((epochs))
    rmse_val = np.zeros((epochs))
    mae_train = np.zeros((epochs))
    mae_val = np.zeros((epochs))

    # Standardize X and X_test together!!
    #mu = np.concatenate((X, X_test), 0).mean()
    #sigma = np.concatenate((X, X_test), 0).std()
    #X = utils.minmax_scaler(X, [mu, sigma])
    #X_test = utils.minmax_scaler(X_test, [mu, sigma])

    X_test = torch.tensor(X_test).type(dtype=torch.float)
    y_test = torch.tensor(Y_test).type(dtype=torch.float)
    X_train = torch.tensor(X).type(dtype=torch.float)
    y_train = torch.tensor(Y).type(dtype=torch.float)

    model_design_add["dimensions"].insert(0, X.shape[1])

    model = models.MLP(model_design_add["dimensions"],
                       model_design_add["activation"])
    optimizer = optim.Adam(model.parameters(), lr=hparams_add["learningrate"])
    criterion = nn.MSELoss()

    for epoch in range(epochs):

        # Training
        model.train()

        x, y = utils.create_batches(X_train, y_train, hparams_add["batchsize"],
                                    hparams_add["history"])

        x = torch.tensor(x).type(dtype=torch.float)
        y = torch.tensor(y).type(dtype=torch.float)

        output = model(x)

        # Compute training loss
        loss = criterion(output, y)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # Evaluate current model at test set
        model.eval()

        with torch.no_grad():
            pred_train = model(X_train)
            pred_test = model(X_test)
            rmse_train[epoch] = utils.rmse(y_train, pred_train)
            rmse_val[epoch] = utils.rmse(y_test, pred_test)
            mae_train[epoch] = metrics.mean_absolute_error(y_train, pred_train)
            mae_val[epoch] = metrics.mean_absolute_error(y_test, pred_test)

    torch.save(
        model.state_dict(),
        os.path.join(
            data_dir,
            f"python\outputs\models\mlp7\\nodropout\sims_frac30\\tuned\setting2\model{i}.pth"
        ))

    running_losses = {
        "rmse_train": rmse_train,
        "mae_train": mae_train,
        "rmse_val": rmse_val,
        "mae_val": mae_val
    }

    return running_losses, pred_test
Exemple #5
0
                                simulations = None)

X_test, Y_test = preprocessing.get_splits(sites = ['hyytiala'],
                                years = [2008],
                                datadir = os.path.join(data_dir, "data"), 
                                dataset = "profound",
                                simulations = None)
#%%
m = nn.AdaptiveMaxPool1d(5)
inp = torch.randn(1, 10, 8)
inp.shape
out = m(inp)
out.shape

#%%
x, y = utils.create_batches(X, Y , 64, 1)
x.shape
x = torch.tensor(x).type(dtype=torch.float)
fc = nn.Linear(1, 64)

ls = []
for i in range(x.shape[1]):
    latent = fc(x[:,i].unsqueeze(1))
    ls.append(latent)
    
torch.stack(ls).shape
#x.unsqueeze(1).shape

#%%
latent = []