Esempio n. 1
0
 def test_step_with_model(self, optim_class):
     torch.manual_seed(1234)
     inp, tar = torch.randn(10, 3), torch.randn(10, 1)
     net = torch.nn.Linear(3, 1)
     model = Model(net, torch.nn.MSELoss(), optim_class(lr=0.1))
     params = get_params(net)
     model.fit(inp, tar, verbose=False)
     new_params = get_params(net)
     with pytest.raises(AssertionError) as ex:
         assert_tupletree_equal(params, new_params)
     assert str(ex.value) == "Not equal values"
def test_readme_example():
    import torch
    from torch import nn
    from torchtuples import Model, optim
    torch.manual_seed(0)

    n = 500
    x0, x1, x2 = [torch.randn(n, 3) for _ in range(3)]
    y = torch.randn(n, 1)
    x = (x0, (x0, x1, x2))

    class Net(nn.Module):
        def __init__(self):
            super().__init__()
            self.lin_tuple = nn.Linear(3, 2)
            self.lin_cat = nn.Linear(5, 1)
            self.relu = nn.ReLU()

        def forward(self, x_tensor, x_tuple):
            x = [self.relu(self.lin_tuple(xi)) for xi in x_tuple]
            x = torch.stack(x).mean(0)
            x = torch.cat([x, x_tensor], dim=1)
            return self.lin_cat(x)
    
    model = Model(Net(), nn.MSELoss(), optim.SGD(0.01))
    log = model.fit(x, y, batch_size=64, epochs=5, verbose=False)
    preds = model.predict(x)
    assert preds is not None
Esempio n. 3
0
emb_model_filename = \
    os.path.join(output_dir, 'models',
                 'rsf_full_train_neural_approx_'
                 +
                 '%s_exp%d_mf%d_msl%d_km%d_'
                 % (dataset, experiment_idx,
                    max_features, min_samples_leaf, use_km)
                 +
                 'bs%d_nep%d_nla%d_nno%d_'
                 % (batch_size, 100, n_layers, n_nodes)
                 +
                 'lr%f_test.pt' % lr)
if not os.path.isfile(emb_model_filename):
    emb_model.fit(X_train_std, mds_embedding,
                  batch_size=batch_size, epochs=100,
                  verbose=False)
    emb_model.save_net(emb_model_filename)
else:
    emb_model.load_net(emb_model_filename)
emb_model.net.train()

print('*** Fine-tuning with DKSA...')
torch.manual_seed(fine_tune_random_seed + 1)
np.random.seed(fine_tune_random_seed + 1)
optimizer = tt.optim.Adam(lr=lr)
if num_durations > 0:
    labtrans = NKSDiscrete.label_transform(num_durations)
    y_train_discrete = labtrans.fit_transform(*y_train.T)
    surv_model = NKSDiscrete(emb_model.net, optimizer,
                             duration_index=labtrans.cuts)