Esempio n. 1
0
def train_lstm_total_dropout(dropout, learning_rate, patience, batch_size, n_layers, bidirectional, hidden_size, hidden_input_1, hidden_input_2, hidden_output, optimiser, pat_drop, sched_factor, N, dt, rho, net):

    data = import_datasets('multi_train_data', N, dt, rho, N_sobol, runs)
    data_train, data_test = train_test_split(data, test_size=0.18, random_state=seed)
    data_train, data_valid = train_test_split(data_train, test_size=0.1, random_state=seed)
    train_set = WorkDataset(data_train[:, 0], N, net=net)
    test_set = WorkDataset(data_test[:, 0], N, net=net)
    valid_set = WorkDataset(data_valid[:, 0], N, net=net)

    torch.manual_seed(seed)
    model = LSTMNetwork(5, 4, hidden_size, [hidden_input_1, hidden_input_2], hidden_output, batch_size, n_layers, N, bidirectional, dropout, dt).double()

    if optimiser == 'adam':
        optimiser = torch.optim.Adam(model.parameters(), lr=learning_rate, betas=(0.9, 0.98), eps=1e-9)
    elif optimiser == 'sgd':
        optimiser = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.99, dampening=0, weight_decay=0, nesterov=True)
    elif optimiser == 'adagrad':
        optimiser = torch.optim.Adagrad(model.parameters(), lr=learning_rate)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimiser, mode='min', factor=sched_factor, patience=patience/pat_drop)
    epoch = model.learn(train_set, valid_set, optimiser, scheduler, patience=patience)

    model = torch.load('best_model_custom_loss').eval()
    vloss = model.calc_loss(valid_set)
    # vwork = model.work_ratio(valid_set, dt)

    return epoch, vloss
Esempio n. 2
0
    def work_ratio(self, data, dt):
        dataset = WorkDataset(data, self.N, 'ann')
        with torch.no_grad():
            X = dataset.__getitem__(range(len(dataset)))['x']
            y_pred = self.forward(X)

        trans_pred = rev_angle_embedding(y_pred, self.N, reshape=True)
        E_pred = np.zeros(len(y_pred))

        for i in range(len(E_pred)):
            E_pred[i] = wrapper(trans_pred[i], data[i, 0][:self.N],
                                data[i, 0][self.N:], dt, data[i, 3], self.N)

        return np.mean(E_pred), np.mean(E_pred / data[:, 2])
Esempio n. 3
0
    def work_ratio(self, data, dt):
        dataset = WorkDataset(data[:, 0], self.N, 'custom_loss')
        with torch.no_grad():
            X = dataset.__getitem__(range(len(dataset)))['x']
            hidden, cell = self.HiddenCellTest(len(X))
            y_pred, internals = self.forward(X, hidden, cell)

        trans_pred = rev_angle_embedding(y_pred, self.N)
        E_pred = np.zeros(len(y_pred))

        for i in range(len(E_pred)):
            E_pred[i] = wrapper(trans_pred[i], data[i, 0][:self.N], data[i, 0][self.N:], dt, data[i, 3], self.N)

        return np.mean(E_pred), np.mean(E_pred / data[:, 2])
Esempio n. 4
0
seed = 42
batch_size = 44
dt = 5
rho = 'eigen'
N_sobol = 45
runs = range(40)

# %%
data = import_datasets('multi_train_data', N, dt, rho, N_sobol, runs)
data_train, data_test = train_test_split(data,
                                         test_size=0.18,
                                         random_state=seed)
data_train, data_valid = train_test_split(data_train,
                                          test_size=0.1,
                                          random_state=seed)
train_set = WorkDataset(data_train, N, net='lstm')
test_set = WorkDataset(data_test, N, net='lstm')
valid_set = WorkDataset(data_valid, N, net='lstm')

# %%
ann = torch.load('models/N_5_ann')
ann
ann.work_ratio(data_test, dt)
ann.calc_loss(test_set)
dt
# %%
bi = torch.load('models/N_5_rho_eigen_lstm').eval()
with torch.no_grad():
    biwr = bi.work_ratio(data_test, dt)
    biloss = bi.calc_loss(test_set)
Esempio n. 5
0
seed = 42
batch_size = 44
dt = 1
rho = 'eigen'
N_sobol = 45
runs = range(21)
net = 'custom-loss'
# %%
data = import_datasets('multi_train_data', N, dt, rho, N_sobol, runs)
data_train, data_test = train_test_split(data,
                                         test_size=0.18,
                                         random_state=seed)
data_train, data_valid = train_test_split(data_train,
                                          test_size=0.1,
                                          random_state=seed)
train_set = WorkDataset(data_train, N, net=net)
test_set = WorkDataset(data_test, N, net=net)
valid_set = WorkDataset(data_valid, N, net=net)
# %%
dt_1_bi = torch.load('models/custom_loss_dt_1_bi')
dt_1_bi.work_ratio(data_test, dt)

dt_1_uni = torch.load('models/custom_loss_dt_1_uni')
dt_1_uni.work_ratio(data_test, dt)

# %%
N = 5
seed = 42
batch_size = 44
dt = 5
rho = 'eigen'
Esempio n. 6
0
from multiproc.pwc_helpers import wrapper
# model = SimpleRNN()

N = 3
batch_size = 30
seed = 42
dt = 5
rho = 'eigen'
N_sobol = 10

data = import_datasets('multi_train_data', N, dt, rho, N_sobol, [0, 1, 2])
data_train, data_test = train_test_split(data,
                                         test_size=0.18,
                                         random_state=seed)
# %%
train_set = WorkDataset(data_train, N, embed=True)
test_set = WorkDataset(data_test, N, embed=True)

dataloader = DataLoader(train_set,
                        batch_size=batch_size,
                        shuffle=True,
                        drop_last=True)
torch.manual_seed(seed)
model = SimpleRNN(4, 4, 10, batch_size)
model = model.double()

learning_rate = 1e-2
criterion = torch.nn.MSELoss()
optimiser = torch.optim.Adam(model.parameters(),
                             lr=learning_rate,
                             betas=(0.9, 0.98),
Esempio n. 7
0
batch_size = 30
seed = 42
learning_rate = 1e-2
n_layers = 2
# %%
N = 5
dt = 5
N_sobol = 15
runs = range(30)
rho = 'eigen'
net = 'lstm'

data = import_datasets('multi_train_data', N, dt, rho, N_sobol, runs)
data_train, data_test = train_test_split(data, test_size=0.18, random_state=seed)
data_train, data_valid = train_test_split(data_train, test_size=0.1, random_state=seed)
train_set = WorkDataset(data_train, N, net)#, embed=True)
test_set = WorkDataset(data_test, N, net)#, embed=True)
valid_set = WorkDataset(data_valid, N, net)#, N, embed=True)

torch.manual_seed(seed)
model = LSTMNetwork(4, 2, 100, batch_size, n_layers, N).double()


optimiser = torch.optim.Adam(model.parameters(), lr=learning_rate, betas=(0.9, 0.98), eps=1e-9)

# %%
model.train(train_set, valid_set, optimiser, patience=30)

# %%

model.work_ratio(data_test, 5)
Esempio n. 8
0
N = 5
seed = 42
batch_size = 44
dt = 1
rho = 'eigen'
N_sobol = 45
runs = range(21)
# %%
data = import_datasets('multi_train_data', N, dt, rho, N_sobol, runs)
data_train, data_test = train_test_split(data,
                                         test_size=0.18,
                                         random_state=seed)
data_train, data_valid = train_test_split(data_train,
                                          test_size=0.1,
                                          random_state=seed)
test_set = WorkDataset(data_test, N, 'lstm')

# %%
# Import models
uni = torch.load('models/dt_1_uni').eval()
bi = torch.load('models/dt_1_bi').eval()
# %%
with torch.no_grad():
    uni_pred = uni.get_work_array(data_test, dt)
    bi_pred = bi.get_work_array(data_test, dt)

delta = uni_pred - bi_pred
plt.hist(delta)

delta.argsort()[2 * len(delta) // 3 + 1]