def test_readme_example():
    import torch
    from torch import nn
    from torchtuples import Model, optim
    torch.manual_seed(0)

    n = 500
    x0, x1, x2 = [torch.randn(n, 3) for _ in range(3)]
    y = torch.randn(n, 1)
    x = (x0, (x0, x1, x2))

    class Net(nn.Module):
        def __init__(self):
            super().__init__()
            self.lin_tuple = nn.Linear(3, 2)
            self.lin_cat = nn.Linear(5, 1)
            self.relu = nn.ReLU()

        def forward(self, x_tensor, x_tuple):
            x = [self.relu(self.lin_tuple(xi)) for xi in x_tuple]
            x = torch.stack(x).mean(0)
            x = torch.cat([x, x_tensor], dim=1)
            return self.lin_cat(x)
    
    model = Model(Net(), nn.MSELoss(), optim.SGD(0.01))
    log = model.fit(x, y, batch_size=64, epochs=5, verbose=False)
    preds = model.predict(x)
    assert preds is not None
Esempio n. 2
0
 def test_predict_net_eval_(self, eval_):
     torch.manual_seed(0)
     model = Model(self.net_dropout)
     assert model.net.training is True
     pred = model.predict(self.data, eval_=eval_)
     assert model.net.training is True
     pred2 = model.predict(self.data, eval_=eval_)
     assert model.net.training is True
     assert (pred == pred2).all().item() is eval_
Esempio n. 3
0
 def test_step_with_model(self, optim_class):
     torch.manual_seed(1234)
     inp, tar = torch.randn(10, 3), torch.randn(10, 1)
     net = torch.nn.Linear(3, 1)
     model = Model(net, torch.nn.MSELoss(), optim_class(lr=0.1))
     params = get_params(net)
     model.fit(inp, tar, verbose=False)
     new_params = get_params(net)
     with pytest.raises(AssertionError) as ex:
         assert_tupletree_equal(params, new_params)
     assert str(ex.value) == "Not equal values"
Esempio n. 4
0
 def test_predict_no_fun(self, dl, numpy):
     data = self.dataloader if dl else self.data
     model = Model(self.net, nn.MSELoss())
     model_pred = Model(self.prednet, nn.MSELoss())
     a = model.predict(data, numpy=numpy)
     b = model_pred.predict(data, numpy=numpy)
     a_net = model.predict_net(data, numpy=numpy)
     b_net = model_pred.predict_net(data, numpy=numpy)
     assert not (a == b).all()
     assert (a_net == b_net).all()
     assert (a == a_net).all()
Esempio n. 5
0
print('*** Fitting neural net to MDS transformation...')
torch.manual_seed(fine_tune_random_seed)
np.random.seed(fine_tune_random_seed)
batch_norm = True
dropout = 0.
output_bias = True
optimizer = tt.optim.Adam(lr=lr)
net = tt.practical.MLPVanilla(X_train_std.shape[1],
                              [n_nodes for layer_idx
                               in range(n_layers)],
                              mds_size,
                              batch_norm,
                              dropout,
                              output_bias=output_bias)
loss = nn.MSELoss()
emb_model = Model(net, loss, optimizer)

emb_model_filename = \
    os.path.join(output_dir, 'models',
                 'rsf_full_train_neural_approx_'
                 +
                 '%s_exp%d_mf%d_msl%d_km%d_'
                 % (dataset, experiment_idx,
                    max_features, min_samples_leaf, use_km)
                 +
                 'bs%d_nep%d_nla%d_nno%d_'
                 % (batch_size, 100, n_layers, n_nodes)
                 +
                 'lr%f_test.pt' % lr)
if not os.path.isfile(emb_model_filename):
    emb_model.fit(X_train_std, mds_embedding,
Esempio n. 6
0
mds_size = min(len(X_train), X_train.shape[1])
torch.manual_seed(fine_tune_random_seed)
torch.cuda.manual_seed_all(fine_tune_random_seed)
np.random.seed(fine_tune_random_seed)
batch_norm = True
dropout = 0.
output_bias = True
optimizer = tt.optim.Adam(lr=lr)
net = tt.practical.MLPVanilla(X_train_std.shape[1],
                              [n_nodes for layer_idx in range(n_layers)],
                              mds_size,
                              batch_norm,
                              dropout,
                              output_bias=output_bias)
loss = nn.MSELoss()
emb_model = Model(net, loss, optimizer)

emb_model_filename = \
    os.path.join(output_dir, 'models',
                 'rsf_full_train_neural_approx_'
                 +
                 '%s_exp%d_%s_mf%d_msl%d_'
                 % (dataset, experiment_idx, init_val_string,
                    max_features, min_samples_leaf)
                 +
                 'bs%d_nep%d_nla%d_nno%d_'
                 % (batch_size, 100, n_layers, n_nodes)
                 +
                 'lr%f_test.pt' % lr)
assert os.path.isfile(emb_model_filename)
emb_model.load_net(emb_model_filename)
Esempio n. 7
0
 def test_score_in_batches(self):
     model = Model(self.net, torch.nn.MSELoss())
     a = model.score_in_batches(*self.data)
     b = model.score_in_batches(self.data)
     assert a == b
Esempio n. 8
0
 def test_set_torch_optimizer(self, optimizer_class, lr):
     optimizer = optimizer_class(self.net.parameters(), lr=lr)
     model = Model(self.net, None, optimizer)
     assert type(model.optimizer) is optim.OptimWrap
     assert type(model.optimizer.optimizer) is optimizer_class
     assert model.optimizer.param_groups[0]['lr'] == lr
Esempio n. 9
0
 def test_set_optimizer(self, optim_class, lr):
     model = Model(self.net, None, optim_class(lr))
     assert type(model.optimizer) is optim_class
     assert model.optimizer.param_groups[0]['lr'] == lr
     model_no_lr = Model(self.net, None, optim_class)
     assert type(model_no_lr.optimizer) is optim_class