Пример #1
0
def test_score_1():
    grid = NDGrid(n_bins_per_feature=5, min=-np.pi, max=np.pi)
    trajs = DoubleWell(random_state=0).get_cached().trajectories
    seqs = grid.fit_transform(trajs)
    model = (ContinuousTimeMSM(verbose=False, lag_time=10, n_timescales=3)
             .fit(seqs))
    np.testing.assert_approx_equal(model.score(seqs), model.score_)
Пример #2
0
def test_uncertainties_backward():
    n = 4
    grid = NDGrid(n_bins_per_feature=n, min=-np.pi, max=np.pi)
    seqs = grid.fit_transform(load_doublewell(random_state=0)['trajectories'])

    model = ContinuousTimeMSM(verbose=False).fit(seqs)
    sigma_ts = model.uncertainty_timescales()
    sigma_lambda = model.uncertainty_eigenvalues()
    sigma_pi = model.uncertainty_pi()
    sigma_K = model.uncertainty_K()

    yield lambda: np.testing.assert_array_almost_equal(
        sigma_ts, [9.13698928, 0.12415533, 0.11713719])
    yield lambda: np.testing.assert_array_almost_equal(
        sigma_lambda, [1.76569687e-19, 7.14216858e-05, 3.31210649e-04, 3.55556718e-04])
    yield lambda: np.testing.assert_array_almost_equal(
        sigma_pi, [0.00741467, 0.00647945, 0.00626743, 0.00777847])
    yield lambda: np.testing.assert_array_almost_equal(
        sigma_K,
        [[  3.39252419e-04, 3.39246173e-04, 0.00000000e+00, 1.62090239e-06],
         [  3.52062861e-04, 3.73305510e-04, 1.24093936e-04, 0.00000000e+00],
         [  0.00000000e+00, 1.04708186e-04, 3.45098923e-04, 3.28820213e-04],
         [  1.25455972e-06, 0.00000000e+00, 2.90118599e-04, 2.90122944e-04]])
    yield lambda: np.testing.assert_array_almost_equal(
        model.ratemat_,
        [[ -2.54439564e-02, 2.54431791e-02,  0.00000000e+00,  7.77248586e-07],
         [  2.64044208e-02,-2.97630373e-02,  3.35861646e-03,  0.00000000e+00],
         [  0.00000000e+00, 2.83988103e-03, -3.01998380e-02,  2.73599570e-02],
         [  6.01581838e-07, 0.00000000e+00,  2.41326592e-02, -2.41332608e-02]])
Пример #3
0
def test_score_3():
    import warnings
    warnings.simplefilter('ignore')
    from msmbuilder.example_datasets.muller import MULLER_PARAMETERS as PARAMS

    cluster = NDGrid(n_bins_per_feature=6,
                     min=[PARAMS['MIN_X'], PARAMS['MIN_Y']],
                     max=[PARAMS['MAX_X'], PARAMS['MAX_Y']])

    ds = MullerPotential(random_state=0).get()['trajectories']
    assignments = cluster.fit_transform(ds)

    train_indices = [9, 4, 3, 6, 2]
    test_indices = [8, 0, 5, 7, 1]

    model = ContinuousTimeMSM(lag_time=3,
                              n_timescales=1,
                              sliding_window=False,
                              ergodic_cutoff=1)
    train_data = [assignments[i] for i in train_indices]
    test_data = [assignments[i] for i in test_indices]

    model.fit(train_data)
    train = model.score_
    test = model.score(test_data)
    print(train, test)
Пример #4
0
def test_score_3():
    ds = MullerPotential(random_state=0).get_cached().trajectories
    cluster = NDGrid(n_bins_per_feature=6,
                     min=[PARAMS['MIN_X'], PARAMS['MIN_Y']],
                     max=[PARAMS['MAX_X'], PARAMS['MAX_Y']])

    assignments = cluster.fit_transform(ds)

    train_indices = [9, 4, 3, 6, 2]
    test_indices = [8, 0, 5, 7, 1]
    temp = '1.0929e-02  5.4147e-02  9.8362e-02  0.1000e+00  6.0455e-02  2.8775e-02\
  6.6456e-02  3.3957e-02  4.1484e-03  0.1000e+00  5.0847e-02  1.1516e-02\
  3.5266e-02  1.2830e-02  0.1000e+00  2.1801e-02  1.6639e-02  9.4932e-03\
  0.1000e+00  0.1000e+00  1.1050e-01  4.0076e-03  0.1000e+00  0.1000e+00\
  1.8930e-02 -7.1060e+00 -4.5787e+00 -2.4950e+00 -4.0964e+00 -7.4127e+00\
 -6.7574e+00 -4.7137e+00 -3.9530e+00 -4.5781e+00 -7.4585e+00 -6.4634e+00\
 -5.8060e+00 -5.4783e+00 -5.3519e+00 -7.4653e+00 -6.5113e+00 -2.1477e+00\
 -4.8138e+00 -9.7187e+00 -9.0358e+00 -1.4599e+00 -8.8985e-01 -8.3461e+00\
 -7.0930e+00 -2.7618e+00 -6.7421e+00'

    model = PESContinuousTimeMSM(lag_time=3, n_timescales=1, sliding_window=False,
                              ergodic_cutoff=1)
    model.theta_ = list(map(np.float64, temp.split()))
    train_data = [assignments[i] for i in train_indices]
    test_data = [assignments[i] for i in test_indices]

    model.fit(train_data)
    print(model.summarize())
    train = model.score_
    test = model.score(test_data)
    print(train, test)
Пример #5
0
def test_5():
    grid = NDGrid(n_bins_per_feature=2)
    seqs = grid.fit_transform(load_quadwell(random_state=0)['trajectories'])

    model2 = BayesianContinuousTimeMSM(n_samples=100).fit(seqs)

    print(model2.summarize())
Пример #6
0
def test_uncertainties_backward():
    n = 4
    grid = NDGrid(n_bins_per_feature=n, min=-np.pi, max=np.pi)
    trajs = DoubleWell(random_state=0).get_cached().trajectories
    seqs = grid.fit_transform(trajs)

    model = PESContinuousTimeMSM(verbose=False).fit(seqs)
    sigma_ts = model.uncertainty_timescales()
    sigma_lambda = model.uncertainty_eigenvalues()
    sigma_pi = model.uncertainty_pi()
    sigma_K = model.uncertainty_K()

    yield lambda: np.testing.assert_array_almost_equal(
        sigma_ts, [9.508936, 0.124428, 0.117638])
    yield lambda: np.testing.assert_array_almost_equal(
        sigma_lambda,
        [1.76569687e-19, 7.14216858e-05, 3.31210649e-04, 3.55556718e-04])
    yield lambda: np.testing.assert_array_almost_equal(
        sigma_pi, [0.007496, 0.006564, 0.006348, 0.007863])
    yield lambda: np.testing.assert_array_almost_equal(
        sigma_K,
        [[0.000339, 0.000339, 0., 0.],
         [0.000352, 0.000372, 0.000122, 0.],
         [0., 0.000103, 0.000344, 0.000329],
         [0., 0., 0.00029, 0.00029]])
    yield lambda: np.testing.assert_array_almost_equal(
        model.ratemat_,
        [[-0.0254, 0.0254, 0., 0.],
         [0.02636, -0.029629, 0.003269, 0.],
         [0., 0.002764, -0.030085, 0.027321],
         [0., 0., 0.024098, -0.024098]])
Пример #7
0
def test_uncertainties_backward():
    n = 4
    grid = NDGrid(n_bins_per_feature=n, min=-np.pi, max=np.pi)
    seqs = grid.fit_transform(load_doublewell(random_state=0)['trajectories'])

    model = ContinuousTimeMSM(verbose=False).fit(seqs)
    sigma_ts = model.uncertainty_timescales()
    sigma_lambda = model.uncertainty_eigenvalues()
    sigma_pi = model.uncertainty_pi()
    sigma_K = model.uncertainty_K()

    yield lambda: np.testing.assert_array_almost_equal(
        sigma_ts, [9.13698928, 0.12415533, 0.11713719])
    yield lambda: np.testing.assert_array_almost_equal(sigma_lambda, [
        1.76569687e-19, 7.14216858e-05, 3.31210649e-04, 3.55556718e-04
    ])
    yield lambda: np.testing.assert_array_almost_equal(
        sigma_pi, [0.00741467, 0.00647945, 0.00626743, 0.00777847])
    yield lambda: np.testing.assert_array_almost_equal(sigma_K, [
        [3.39252419e-04, 3.39246173e-04, 0.00000000e+00, 1.62090239e-06],
        [3.52062861e-04, 3.73305510e-04, 1.24093936e-04, 0.00000000e+00],
        [0.00000000e+00, 1.04708186e-04, 3.45098923e-04, 3.28820213e-04],
        [1.25455972e-06, 0.00000000e+00, 2.90118599e-04, 2.90122944e-04]
    ])
    yield lambda: np.testing.assert_array_almost_equal(model.ratemat_, [
        [-2.54439564e-02, 2.54431791e-02, 0.00000000e+00, 7.77248586e-07],
        [2.64044208e-02, -2.97630373e-02, 3.35861646e-03, 0.00000000e+00],
        [0.00000000e+00, 2.83988103e-03, -3.01998380e-02, 2.73599570e-02],
        [6.01581838e-07, 0.00000000e+00, 2.41326592e-02, -2.41332608e-02]
    ])
Пример #8
0
def test_5():
    grid = NDGrid(n_bins_per_feature=2)
    seqs = grid.fit_transform(load_quadwell(random_state=0)['trajectories'])

    model2 = BayesianContinuousTimeMSM(n_samples=100).fit(seqs)

    print(model2.summarize())
Пример #9
0
def test_score_2():
    ds = MullerPotential(random_state=0).get_cached().trajectories
    cluster = NDGrid(n_bins_per_feature=6,
                     min=[PARAMS['MIN_X'], PARAMS['MIN_Y']],
                     max=[PARAMS['MAX_X'], PARAMS['MAX_Y']])
    assignments = cluster.fit_transform(ds)
    test_indices = [5, 0, 4, 1, 2]
    train_indices = [3, 6, 7, 8, 9]

    model = PESContinuousTimeMSM(lag_time=3, n_timescales=1)
    temp='3.13268076e-02  1.45678356e-01  3.12558665e-01  0.10000000e+00\
  2.72951862e-02  9.64773504e-02  1.26398091e-01  1.27775726e-01\
  2.74208403e-03  9.57265955e-04  1.89498433e-01  0.10010000e+00\
  9.73644477e-02  4.16877008e-02  0.10010000e+00  1.73803374e-01\
  4.31281724e-02  0.10001000e+00  0.10010000e+00  4.03763450e-01\
  0.10001000e+00  0.10001000e+00  0.10010000e+00  2.78156537e-01\
 -7.75852152e+00 -4.77716045e+00 -2.67428479e+00 -4.33901900e+00\
 -9.23925293e+00 -6.65216281e+00 -4.88309143e+00 -4.04247463e+00\
 -4.67140081e+00 -7.95471679e+00 -6.26342874e+00 -6.02515423e+00\
 -5.64532492e+00 -5.56770596e+00 -7.66164067e+00 -6.22050765e+00\
 -2.12577068e+00 -4.84152585e+00 -9.21360166e+00 -1.43207874e+00\
 -8.55459835e-01 -9.21329384e+00 -6.99418825e+00 -2.73060233e+00\
 -6.60364249e+00'
    model.theta_= list(map(np.float64, temp.split()))

    model.fit([assignments[i] for i in train_indices])
    print('Initial theta: \n')
    print(model._initial_guess(model.countsmat_))
    test = model.score([assignments[i] for i in test_indices])
    train = model.score_
    print('train', train, 'test', test)
    print(model.optimizer_state_)
    # print(model.summarize())
    assert 1 <= test < 2
    assert 1 <= train < 2
Пример #10
0
def test_score_1():
    grid = NDGrid(n_bins_per_feature=5, min=-np.pi, max=np.pi)
    trajs = DoubleWell(random_state=0).get_cached().trajectories
    seqs = grid.fit_transform(trajs)
    model = (PESContinuousTimeMSM(verbose=False, lag_time=10, n_timescales=3)
             .fit(seqs))
    np.testing.assert_approx_equal(model.score(seqs), model.score_)
Пример #11
0
def test_uncertainties_backward():
    n = 4
    grid = NDGrid(n_bins_per_feature=n, min=-np.pi, max=np.pi)
    trajs = DoubleWell(random_state=0).get_cached().trajectories
    seqs = grid.fit_transform(trajs)

    model = ContinuousTimeMSM(verbose=False).fit(seqs)
    sigma_ts = model.uncertainty_timescales()
    sigma_lambda = model.uncertainty_eigenvalues()
    sigma_pi = model.uncertainty_pi()
    sigma_K = model.uncertainty_K()

    yield lambda: np.testing.assert_array_almost_equal(
        sigma_ts, [9.508936, 0.124428, 0.117638])
    yield lambda: np.testing.assert_array_almost_equal(
        sigma_lambda,
        [1.76569687e-19, 7.14216858e-05, 3.31210649e-04, 3.55556718e-04])
    yield lambda: np.testing.assert_array_almost_equal(
        sigma_pi, [0.007496, 0.006564, 0.006348, 0.007863])
    yield lambda: np.testing.assert_array_almost_equal(
        sigma_K,
        [[0.000339, 0.000339, 0., 0.],
         [0.000352, 0.000372, 0.000122, 0.],
         [0., 0.000103, 0.000344, 0.000329],
         [0., 0., 0.00029, 0.00029]])
    yield lambda: np.testing.assert_array_almost_equal(
        model.ratemat_,
        [[-0.0254, 0.0254, 0., 0.],
         [0.02636, -0.029629, 0.003269, 0.],
         [0., 0.002764, -0.030085, 0.027321],
         [0., 0., 0.024098, -0.024098]])
Пример #12
0
def test_optimize_1():
    n = 100
    grid = NDGrid(n_bins_per_feature=n, min=-np.pi, max=np.pi)
    seqs = grid.fit_transform(load_doublewell(random_state=0)['trajectories'])

    model = ContinuousTimeMSM(use_sparse=True, verbose=True).fit(seqs)

    y, x, n = model.loglikelihoods_.T
    x = x-x[0]
    cross = np.min(np.where(n==n[-1])[0])
Пример #13
0
def test_doublewell():
    trjs = load_doublewell(random_state=0)['trajectories']
    for n_states in [10, 50]:
        clusterer = NDGrid(n_bins_per_feature=n_states)
        assignments = clusterer.fit_transform(trjs)

        for sliding_window in [True, False]:
            model = ContinuousTimeMSM(lag_time=100, sliding_window=sliding_window)
            model.fit(assignments)
            assert model.optimizer_state_.success
Пример #14
0
def test_doublewell():
    trjs = load_doublewell(random_state=0)['trajectories']
    for n_states in [10, 50]:
        clusterer = NDGrid(n_bins_per_feature=n_states)
        assignments = clusterer.fit_transform(trjs)

        for sliding_window in [True, False]:
            model = ContinuousTimeMSM(lag_time=100,
                                      sliding_window=sliding_window)
            model.fit(assignments)
            assert model.optimizer_state_.success
Пример #15
0
def test_doublewell():
    trjs = DoubleWell(random_state=0).get_cached().trajectories
    for n_states in [10, 36]:
        clusterer = NDGrid(n_bins_per_feature=n_states)
        assignments = clusterer.fit_transform(trjs)

        for sliding_window in [True, False]:
            model = PESContinuousTimeMSM(lag_time=100,
                                      sliding_window=sliding_window)
            model.fit(assignments)
            # print(model.summarize())
            print(model.optimizer_state_)
            assert model.optimizer_state_.success
Пример #16
0
def test_hessian():
    grid = NDGrid(n_bins_per_feature=10, min=-np.pi, max=np.pi)
    seqs = grid.fit_transform(load_doublewell(random_state=0)['trajectories'])
    seqs = [seqs[i] for i in range(10)]

    lag_time = 10
    model = ContinuousTimeMSM(verbose=True, lag_time=lag_time)
    model.fit(seqs)
    msm = MarkovStateModel(verbose=False, lag_time=lag_time)
    print(model.summarize())
    print('MSM timescales\n', msm.fit(seqs).timescales_)
    print('Uncertainty K\n', model.uncertainty_K())
    print('Uncertainty pi\n', model.uncertainty_pi())
Пример #17
0
def test_hessian_3():
    grid = NDGrid(n_bins_per_feature=4, min=-np.pi, max=np.pi)
    seqs = grid.fit_transform(load_doublewell(random_state=0)['trajectories'])
    seqs = [seqs[i] for i in range(10)]

    lag_time = 10
    model = ContinuousTimeMSM(verbose=False, lag_time=lag_time)
    model.fit(seqs)
    msm = MarkovStateModel(verbose=False, lag_time=lag_time)
    print(model.summarize())
    # print('MSM timescales\n', msm.fit(seqs).timescales_)
    print('Uncertainty K\n', model.uncertainty_K())
    print('Uncertainty eigs\n', model.uncertainty_eigenvalues())
Пример #18
0
def test_hessian_3():
    grid = NDGrid(n_bins_per_feature=4, min=-np.pi, max=np.pi)
    trajs = DoubleWell(random_state=0).get_cached().trajectories
    seqs = grid.fit_transform(trajs)
    seqs = [seqs[i] for i in range(10)]

    lag_time = 10
    model = ContinuousTimeMSM(verbose=False, lag_time=lag_time)
    model.fit(seqs)
    msm = MarkovStateModel(verbose=False, lag_time=lag_time)
    print(model.summarize())
    # print('MSM timescales\n', msm.fit(seqs).timescales_)
    print('Uncertainty K\n', model.uncertainty_K())
    print('Uncertainty eigs\n', model.uncertainty_eigenvalues())
Пример #19
0
def test_5():
    trjs = DoubleWell(random_state=0).get_cached().trajectories
    clusterer = NDGrid(n_bins_per_feature=5)
    mle_msm = MarkovStateModel(lag_time=100, verbose=False)
    b_msm = BayesianMarkovStateModel(lag_time=100, n_samples=1000, n_chains=8, n_steps=1000, random_state=0)

    states = clusterer.fit_transform(trjs)
    b_msm.fit(states)
    mle_msm.fit(states)

    # this is a pretty silly test. it checks that the mean transition
    # matrix is not so dissimilar from the MLE transition matrix.
    # This shouldn't necessarily be the case anyways -- the likelihood is
    # not "symmetric". And the cutoff chosen is just heuristic.
    assert np.linalg.norm(b_msm.all_transmats_.mean(axis=0) - mle_msm.transmat_) < 1e-2
Пример #20
0
def test_hessian_1():
    n = 5
    grid = NDGrid(n_bins_per_feature=n, min=-np.pi, max=np.pi)
    seqs = grid.fit_transform(load_doublewell(random_state=0)['trajectories'])

    model = ContinuousTimeMSM(use_sparse=False).fit(seqs)
    theta = model.theta_
    C = model.countsmat_

    hessian1 = _ratematrix.hessian(theta, C, n)
    Hfun = nd.Jacobian(lambda x: _ratematrix.loglikelihood(x, C, n)[1])
    hessian2 = Hfun(theta)

    # not sure what the cutoff here should be (see plot_test_hessian)
    assert np.linalg.norm(hessian1-hessian2) < 1
Пример #21
0
def test_score_2():
    ds = MullerPotential(random_state=0).get_cached().trajectories
    cluster = NDGrid(n_bins_per_feature=6,
                     min=[PARAMS['MIN_X'], PARAMS['MIN_Y']],
                     max=[PARAMS['MAX_X'], PARAMS['MAX_Y']])
    assignments = cluster.fit_transform(ds)
    test_indices = [5, 0, 4, 1, 2]
    train_indices = [3, 6, 7, 8, 9]

    model = ContinuousTimeMSM(lag_time=3, n_timescales=1)
    model.fit([assignments[i] for i in train_indices])
    test = model.score([assignments[i] for i in test_indices])
    train = model.score_
    print('train', train, 'test', test)
    assert 1 <= test < 2
    assert 1 <= train < 2
Пример #22
0
def test_guess():
    ds = MullerPotential(random_state=0).get_cached().trajectories
    cluster = NDGrid(n_bins_per_feature=5,
                     min=[PARAMS['MIN_X'], PARAMS['MIN_Y']],
                     max=[PARAMS['MAX_X'], PARAMS['MAX_Y']])
    assignments = cluster.fit_transform(ds)

    model1 = ContinuousTimeMSM(guess='log')
    model1.fit(assignments)

    model2 = ContinuousTimeMSM(guess='pseudo')
    model2.fit(assignments)

    diff = model1.loglikelihoods_[-1] - model2.loglikelihoods_[-1]
    assert np.abs(diff) < 1e-3
    assert np.max(np.abs(model1.ratemat_ - model2.ratemat_)) < 1e-1
Пример #23
0
def test_fit_2():
    grid = NDGrid(n_bins_per_feature=5, min=-np.pi, max=np.pi)
    seqs = grid.fit_transform(load_doublewell(random_state=0)['trajectories'])

    model = ContinuousTimeMSM(verbose=False, lag_time=10)
    model.fit(seqs)
    t1 = np.sort(model.timescales_)
    t2 = -1 / np.sort(np.log(np.linalg.eigvals(model.transmat_))[1:])

    model = MarkovStateModel(verbose=False, lag_time=10)
    model.fit(seqs)
    t3 = np.sort(model.timescales_)

    np.testing.assert_array_almost_equal(t1, t2)
    # timescales should be similar to MSM (withing 50%)
    assert abs(t1[-1] - t3[-1]) / t1[-1] < 0.50
Пример #24
0
def test_fit_2():
    grid = NDGrid(n_bins_per_feature=5, min=-np.pi, max=np.pi)
    seqs = grid.fit_transform(load_doublewell(random_state=0)['trajectories'])

    model = ContinuousTimeMSM(verbose=True, lag_time=10)
    model.fit(seqs)
    t1 = np.sort(model.timescales_)
    t2 = -1/np.sort(np.log(np.linalg.eigvals(model.transmat_))[1:])

    model = MarkovStateModel(verbose=False, lag_time=10)
    model.fit(seqs)
    t3 = np.sort(model.timescales_)

    np.testing.assert_array_almost_equal(t1, t2)
    # timescales should be similar to MSM (withing 50%)
    assert abs(t1[-1] - t3[-1]) / t1[-1] < 0.50
Пример #25
0
def test_score_2():
    from msmbuilder.example_datasets.muller import MULLER_PARAMETERS as PARAMS
    ds = MullerPotential(random_state=0).get()['trajectories']
    cluster = NDGrid(n_bins_per_feature=6,
                     min=[PARAMS['MIN_X'], PARAMS['MIN_Y']],
                     max=[PARAMS['MAX_X'], PARAMS['MAX_Y']])
    assignments = cluster.fit_transform(ds)
    test_indices = [5, 0, 4, 1, 2]
    train_indices = [3, 6, 7, 8, 9]

    model = ContinuousTimeMSM(lag_time=3, n_timescales=1)
    model.fit([assignments[i] for i in train_indices])
    test = model.score([assignments[i] for i in test_indices])
    train = model.score_
    print('train', train, 'test', test)
    assert 1 <= test < 2
    assert 1 <= train < 2
Пример #26
0
def test_score_2():
    from msmbuilder.example_datasets.muller import MULLER_PARAMETERS as PARAMS
    ds = MullerPotential(random_state=0).get()['trajectories']
    cluster = NDGrid(n_bins_per_feature=6,
          min=[PARAMS['MIN_X'], PARAMS['MIN_Y']],
          max=[PARAMS['MAX_X'], PARAMS['MAX_Y']])
    assignments = cluster.fit_transform(ds)
    test_indices = [5, 0, 4, 1, 2]
    train_indices = [3, 6, 7, 8, 9]

    model = ContinuousTimeMSM(lag_time=3, n_timescales=1)
    model.fit([assignments[i] for i in train_indices])
    test = model.score([assignments[i] for i in test_indices])
    train = model.score_
    print('train', train, 'test', test)
    assert 1 <= test < 2
    assert 1 <= train < 2
Пример #27
0
def test_guess():
    from msmbuilder.example_datasets.muller import MULLER_PARAMETERS as PARAMS

    cluster = NDGrid(n_bins_per_feature=5,
          min=[PARAMS['MIN_X'], PARAMS['MIN_Y']],
          max=[PARAMS['MAX_X'], PARAMS['MAX_Y']])

    ds = MullerPotential(random_state=0).get()['trajectories']
    assignments = cluster.fit_transform(ds)

    model1 = ContinuousTimeMSM(guess='log')
    model1.fit(assignments)

    model2 = ContinuousTimeMSM(guess='pseudo')
    model2.fit(assignments)

    assert np.abs(model1.loglikelihoods_[-1] - model2.loglikelihoods_[-1]) < 1e-3
    assert np.max(np.abs(model1.ratemat_ - model2.ratemat_)) < 1e-1
Пример #28
0
def test_guess():
    from msmbuilder.example_datasets.muller import MULLER_PARAMETERS as PARAMS

    cluster = NDGrid(n_bins_per_feature=5,
                     min=[PARAMS['MIN_X'], PARAMS['MIN_Y']],
                     max=[PARAMS['MAX_X'], PARAMS['MAX_Y']])

    ds = MullerPotential(random_state=0).get()['trajectories']
    assignments = cluster.fit_transform(ds)

    model1 = ContinuousTimeMSM(guess='log')
    model1.fit(assignments)

    model2 = ContinuousTimeMSM(guess='pseudo')
    model2.fit(assignments)

    diff = model1.loglikelihoods_[-1] - model2.loglikelihoods_[-1]
    assert np.abs(diff) < 1e-3
    assert np.max(np.abs(model1.ratemat_ - model2.ratemat_)) < 1e-1
Пример #29
0
def test_5():
    trjs = DoubleWell(random_state=0).get_cached().trajectories
    clusterer = NDGrid(n_bins_per_feature=5)
    mle_msm = MarkovStateModel(lag_time=100, verbose=False)
    b_msm = BayesianMarkovStateModel(lag_time=100,
                                     n_samples=1000,
                                     n_chains=8,
                                     n_steps=1000,
                                     random_state=0)

    states = clusterer.fit_transform(trjs)
    b_msm.fit(states)
    mle_msm.fit(states)

    # this is a pretty silly test. it checks that the mean transition
    # matrix is not so dissimilar from the MLE transition matrix.
    # This shouldn't necessarily be the case anyways -- the likelihood is
    # not "symmetric". And the cutoff chosen is just heuristic.
    assert np.linalg.norm(
        b_msm.all_transmats_.mean(axis=0) - mle_msm.transmat_) < 1e-2
Пример #30
0
def test_score_3():
    ds = MullerPotential(random_state=0).get_cached().trajectories
    cluster = NDGrid(n_bins_per_feature=6,
                     min=[PARAMS['MIN_X'], PARAMS['MIN_Y']],
                     max=[PARAMS['MAX_X'], PARAMS['MAX_Y']])

    assignments = cluster.fit_transform(ds)

    train_indices = [9, 4, 3, 6, 2]
    test_indices = [8, 0, 5, 7, 1]

    model = ContinuousTimeMSM(lag_time=3, n_timescales=1, sliding_window=False,
                              ergodic_cutoff=1)
    train_data = [assignments[i] for i in train_indices]
    test_data = [assignments[i] for i in test_indices]

    model.fit(train_data)
    train = model.score_
    test = model.score(test_data)
    print(train, test)
Пример #31
0
def _plot_test_hessian():
    # plot the difference between the numerical hessian and the analytic
    # approximate hessian (opens Matplotlib window)
    n = 5
    grid = NDGrid(n_bins_per_feature=n, min=-np.pi, max=np.pi)
    seqs = grid.fit_transform(load_doublewell(random_state=0)['trajectories'])

    model = ContinuousTimeMSM(use_sparse=False).fit(seqs)
    theta = model.theta_
    C = model.countsmat_

    hessian1 = _ratematrix.hessian(theta, C, n)
    Hfun = nd.Jacobian(lambda x: _ratematrix.loglikelihood(x, C, n)[1])
    hessian2 = Hfun(theta)

    import matplotlib.pyplot as pp
    pp.scatter(hessian1.flat, hessian2.flat, marker='x')
    pp.plot(pp.xlim(), pp.xlim(), 'k')
    print('Plotting...', file=sys.stderr)
    pp.show()
Пример #32
0
def test_score_3():
    from msmbuilder.example_datasets.muller import MULLER_PARAMETERS as PARAMS

    cluster = NDGrid(n_bins_per_feature=6,
          min=[PARAMS['MIN_X'], PARAMS['MIN_Y']],
          max=[PARAMS['MAX_X'], PARAMS['MAX_Y']])

    ds = MullerPotential(random_state=0).get()['trajectories']
    assignments = cluster.fit_transform(ds)

    train_indices = [9, 4, 3, 6, 2]
    test_indices = [8, 0, 5, 7, 1]

    model = ContinuousTimeMSM(lag_time=3, n_timescales=1, sliding_window=False, ergodic_cutoff=1)
    train_data = [assignments[i] for i in train_indices]
    test_data = [assignments[i] for i in test_indices]

    model.fit(train_data)
    train = model.score_
    test = model.score(test_data)
    print(train, test)
def get_dtrajs(X, xmin, xmax, m):
    cluster = NDGrid(min=xmin, max=xmax, n_bins_per_feature=m)
    dtrajs = cluster.fit_transform(X)
    return dtrajs
Пример #34
0
def test_5():
    grid = NDGrid(n_bins_per_feature=2)
    trajectories = QuadWell(random_state=0).get_cached().trajectories
    seqs = grid.fit_transform(trajectories)

    model2 = BayesianContinuousTimeMSM(n_samples=100).fit(seqs)
Пример #35
0
def test_5():
    grid = NDGrid(n_bins_per_feature=2)
    trajectories = QuadWell(random_state=0).get_cached().trajectories
    seqs = grid.fit_transform(trajectories)

    model2 = BayesianContinuousTimeMSM(n_samples=100).fit(seqs)
Пример #36
0
def test_score_1():
    grid = NDGrid(n_bins_per_feature=5, min=-np.pi, max=np.pi)
    seqs = grid.fit_transform(load_doublewell(random_state=0)['trajectories'])
    model = ContinuousTimeMSM(verbose=False, lag_time=10, n_timescales=3).fit(seqs)
    np.testing.assert_approx_equal(model.score(seqs), model.score_)
Пример #37
0
def test_score_1():
    grid = NDGrid(n_bins_per_feature=5, min=-np.pi, max=np.pi)
    seqs = grid.fit_transform(load_doublewell(random_state=0)['trajectories'])
    model = (ContinuousTimeMSM(verbose=False, lag_time=10,
                               n_timescales=3).fit(seqs))
    np.testing.assert_approx_equal(model.score(seqs), model.score_)