예제 #1
0
def train_lstm_total_dropout(dropout, learning_rate, patience, batch_size, n_layers, bidirectional, hidden_size, hidden_input_1, hidden_input_2, hidden_output, optimiser, pat_drop, sched_factor, N, dt, rho, net):

    data = import_datasets('multi_train_data', N, dt, rho, N_sobol, runs)
    data_train, data_test = train_test_split(data, test_size=0.18, random_state=seed)
    data_train, data_valid = train_test_split(data_train, test_size=0.1, random_state=seed)
    train_set = WorkDataset(data_train[:, 0], N, net=net)
    test_set = WorkDataset(data_test[:, 0], N, net=net)
    valid_set = WorkDataset(data_valid[:, 0], N, net=net)

    torch.manual_seed(seed)
    model = LSTMNetwork(5, 4, hidden_size, [hidden_input_1, hidden_input_2], hidden_output, batch_size, n_layers, N, bidirectional, dropout, dt).double()

    if optimiser == 'adam':
        optimiser = torch.optim.Adam(model.parameters(), lr=learning_rate, betas=(0.9, 0.98), eps=1e-9)
    elif optimiser == 'sgd':
        optimiser = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.99, dampening=0, weight_decay=0, nesterov=True)
    elif optimiser == 'adagrad':
        optimiser = torch.optim.Adagrad(model.parameters(), lr=learning_rate)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimiser, mode='min', factor=sched_factor, patience=patience/pat_drop)
    epoch = model.learn(train_set, valid_set, optimiser, scheduler, patience=patience)

    model = torch.load('best_model_custom_loss').eval()
    vloss = model.calc_loss(valid_set)
    # vwork = model.work_ratio(valid_set, dt)

    return epoch, vloss
예제 #2
0
파일: calc_eff_dt_5.py 프로젝트: fsoest/ba
from multiproc.data_preprocessing import import_datasets
from sklearn.model_selection import train_test_split
from train_lstm import train_lstm_total_dropout as train_lstm
import numpy as np
from lower_bound import lower_bound

N = 5
seed = 42
batch_size = 44
dt = 5
rho = 'eigen'
N_sobol = 45
runs = range(40)

# %%
data = import_datasets('multi_train_data', N, dt, rho, N_sobol, runs)
data_train, data_test = train_test_split(data,
                                         test_size=0.18,
                                         random_state=seed)
data_train, data_valid = train_test_split(data_train,
                                          test_size=0.1,
                                          random_state=seed)
train_set = WorkDataset(data_train, N, net='lstm')
test_set = WorkDataset(data_test, N, net='lstm')
valid_set = WorkDataset(data_valid, N, net='lstm')

# %%
ann = torch.load('models/N_5_ann')
ann
ann.work_ratio(data_test, dt)
ann.calc_loss(test_set)
예제 #3
0
from dataset import WorkDataset
from torch.utils.data import DataLoader
import torch
from multiproc.data_preprocessing import import_datasets, rev_angle_embedding
from sklearn.model_selection import train_test_split
from multiproc.pwc_helpers import wrapper
# model = SimpleRNN()

N = 3
batch_size = 30
seed = 42
dt = 5
rho = 'eigen'
N_sobol = 10

data = import_datasets('multi_train_data', N, dt, rho, N_sobol, [0, 1, 2])
data_train, data_test = train_test_split(data,
                                         test_size=0.18,
                                         random_state=seed)
# %%
train_set = WorkDataset(data_train, N, embed=True)
test_set = WorkDataset(data_test, N, embed=True)

dataloader = DataLoader(train_set,
                        batch_size=batch_size,
                        shuffle=True,
                        drop_last=True)
torch.manual_seed(seed)
model = SimpleRNN(4, 4, 10, batch_size)
model = model.double()