def test_estimator(fixed_seed): data = deeptime.data.ellipsoids() obs = data.observations(60000, n_dim=10).astype(np.float32) # set up the lobe lobe = nn.Sequential(nn.Linear(10, 1), nn.Tanh()) # train the lobe opt = torch.optim.Adam(lobe.parameters(), lr=5e-4) for _ in range(50): for X, Y in deeptime.data.timeshifted_split(obs, lagtime=1, chunksize=512): opt.zero_grad() lval = vampnet_loss(lobe(torch.from_numpy(X)), lobe(torch.from_numpy(Y))) lval.backward() opt.step() # now let's compare lobe.eval() ds = TimeLaggedDataset(1, obs) loader = DataLoader(ds, batch_size=512) vampnet = VAMPNet(lobe=lobe) vampnet_model = vampnet.fit(loader).fetch_model() # reference model w/o learnt featurization projection = VAMP(lagtime=1, observable_transform=vampnet_model).fit(obs).transform( obs, propagate=True) dtraj = KMeans(2).fit(projection).transform(projection) msm_vampnet = MaximumLikelihoodMSM().fit(dtraj, lagtime=1).fetch_model() np.testing.assert_array_almost_equal(msm_vampnet.transition_matrix, data.msm.transition_matrix, decimal=2)
def test_estimator_fit(fixed_seed, dtype): data = deeptime.data.ellipsoids() obs = data.observations(60000, n_dim=2).astype(dtype) train, val = torch.utils.data.random_split( deeptime.data.TimeLaggedDataset.from_trajectory(1, obs), [50000, 9999]) # set up the lobe linear_layer = nn.Linear(2, 1) lobe = nn.Sequential(linear_layer, nn.Tanh()) with torch.no_grad(): linear_layer.weight[0, 0] = -0.3030 linear_layer.weight[0, 1] = 0.3060 linear_layer.bias[0] = -0.7392 net = VAMPNet(lobe=lobe, dtype=dtype, learning_rate=1e-8) train_loader = DataLoader(train, batch_size=512, shuffle=True) val_loader = DataLoader(val, batch_size=512) net.fit(train_loader, n_epochs=1, validation_data=val_loader, validation_score_callback=lambda *x: x) # reference model w/o learnt featurization projection = VAMP( lagtime=1, observable_transform=net).fit(obs).fetch_model().transform(obs) dtraj = KMeans(2).fit(projection).transform(projection) msm_vampnet = MaximumLikelihoodMSM().fit(dtraj, lagtime=1).fetch_model() np.testing.assert_array_almost_equal(msm_vampnet.transition_matrix, data.msm.transition_matrix, decimal=2)
def test_no_side_effects(): mlp = nn.Linear(10, 2) data = deeptime.data.ellipsoids() obs = data.observations(100, n_dim=10).astype(np.float32) net = VAMPNet(lobe=mlp, dtype=np.float32, learning_rate=1e-8) ds = TrajectoryDataset(1, obs) train_loader = DataLoader(ds, batch_size=512, shuffle=True) model1 = net.fit(train_loader, n_epochs=1).fetch_model() model2 = net.fit(train_loader, n_epochs=1).fetch_model() with torch.no_grad(): assert_(model1.lobe is not model2.lobe) # check it is not the same instance
def test_no_side_effects(): mlp = nn.Linear(10, 2) data = deeptime.data.ellipsoids() obs = data.observations(100, n_dim=10).astype(np.float32) net = VAMPNet(lobe=mlp, dtype=np.float32, learning_rate=1e-8) train_loader = create_timelagged_data_loader(obs, lagtime=1, batch_size=512) model1 = net.fit(train_loader, n_epochs=1).fetch_model() model2 = net.fit(train_loader, n_epochs=1).fetch_model() with torch.no_grad(): assert_(model1.lobe is not model2.lobe) # check it is not the same instance # no side effects: if assert_equal raises, this is not equal, which is expected with assert_raises(AssertionError): assert_equal(model1.lobe.weight.data.cpu().numpy(), model2.lobe.weight.data.cpu().numpy())