Example #1
0
def test_estimator_fit(dtype):
    data = deeptime.data.ellipsoids()
    obs = data.observations(60000, n_dim=2).astype(dtype)
    train, val = torch.utils.data.random_split(deeptime.data.TimeLaggedDataset.from_trajectory(1, obs), [50000, 9999])

    # set up the lobe
    linear_layer = nn.Linear(2, 1)
    lobe = nn.Sequential(linear_layer, nn.Tanh())

    with torch.no_grad():
        linear_layer.weight[0, 0] = -0.3030
        linear_layer.weight[0, 1] = 0.3060
        linear_layer.bias[0] = -0.7392

    net = VAMPNet(lobe=lobe, dtype=dtype, learning_rate=1e-8)
    train_loader = create_timelagged_data_loader(train, lagtime=1, batch_size=512)
    val_loader = create_timelagged_data_loader(val, lagtime=1, batch_size=512)
    net.fit(train_loader, n_epochs=1, validation_data=val_loader, validation_score_callback=lambda *x: x)
    projection = net.transform(obs)

    # reference model w/o learnt featurization
    projection = VAMP(lagtime=1).fit(projection).fetch_model().transform(projection)

    dtraj = Kmeans(2).fit(projection).transform(projection)
    msm_vampnet = MaximumLikelihoodMSM().fit(dtraj, lagtime=1).fetch_model()

    np.testing.assert_array_almost_equal(msm_vampnet.transition_matrix, data.msm.transition_matrix, decimal=2)
Example #2
0
def test_estimator():
    data = deeptime.data.ellipsoids()
    obs = data.observations(60000, n_dim=10).astype(np.float32)

    # set up the lobe
    lobe = nn.Sequential(nn.Linear(10, 1), nn.Tanh())
    # train the lobe
    opt = torch.optim.Adam(lobe.parameters(), lr=5e-4)
    for _ in range(50):
        for X, Y in deeptime.data.timeshifted_split(obs, lagtime=1, chunksize=512):
            opt.zero_grad()
            lval = loss(lobe(torch.from_numpy(X)), lobe(torch.from_numpy(Y)))
            lval.backward()
            opt.step()

    # now let's compare
    lobe.eval()
    loader = create_timelagged_data_loader(obs, lagtime=1, batch_size=512)
    vampnet = VAMPNet(lobe=lobe)
    vampnet_model = vampnet.fit(loader).fetch_model()
    # np.testing.assert_array_less(vamp_model.timescales()[0], vampnet_model.timescales()[0])

    projection = vampnet_model.transform(obs)
    # reference model w/o learnt featurization
    projection = VAMP(lagtime=1).fit(projection).fetch_model().transform(projection)

    dtraj = Kmeans(2).fit(projection).transform(projection)
    msm_vampnet = MaximumLikelihoodMSM().fit(dtraj, lagtime=1).fetch_model()

    np.testing.assert_array_almost_equal(msm_vampnet.transition_matrix, data.msm.transition_matrix, decimal=2)
Example #3
0
def two_state_hmm():
    length = 1000
    batch_size = 64
    transition_matrix = np.asarray([[0.9, 0.1], [0.1, 0.9]])
    msm = dt.markov.msm.MarkovStateModel(transition_matrix)
    dtraj = msm.simulate(length, seed=42)
    traj = np.random.randn(len(dtraj))
    traj[np.where(dtraj == 1)[0]] += 20.0
    traj_stacked = np.vstack((traj, np.zeros(len(traj))))
    phi = np.random.rand() * 2.0 * np.pi
    rot = np.asarray([[np.cos(phi), -np.sin(phi)], [np.sin(phi), np.cos(phi)]])
    traj_rot = np.dot(rot, traj_stacked).T

    return traj, traj_rot, create_timelagged_data_loader(traj_rot,
                                                         lagtime=1,
                                                         batch_size=batch_size)
Example #4
0
def test_no_side_effects():
    mlp = nn.Linear(10, 2)
    data = deeptime.data.ellipsoids()
    obs = data.observations(100, n_dim=10).astype(np.float32)
    net = VAMPNet(lobe=mlp, dtype=np.float32, learning_rate=1e-8)
    train_loader = create_timelagged_data_loader(obs,
                                                 lagtime=1,
                                                 batch_size=512)
    model1 = net.fit(train_loader, n_epochs=1).fetch_model()
    model2 = net.fit(train_loader, n_epochs=1).fetch_model()
    with torch.no_grad():
        assert_(model1.lobe
                is not model2.lobe)  # check it is not the same instance
        # no side effects: if assert_equal raises, this is not equal, which is expected
        with assert_raises(AssertionError):
            assert_equal(model1.lobe.weight.data.cpu().numpy(),
                         model2.lobe.weight.data.cpu().numpy())