Ejemplo n.º 1
0
def make_synthetic_data():
    mu_init = np.zeros(D)
    # mu_init[0] = 1.0
    sigma_init = 0.5 * np.eye(D)

    A = np.eye(D)
    # A[:2,:2] = \
    #     0.99*np.array([[np.cos(np.pi/24), -np.sin(np.pi/24)],
    #                    [np.sin(np.pi/24),  np.cos(np.pi/24)]])
    sigma_states = 0.1 * np.eye(D)

    # C = np.hstack((np.ones((K-1, 1)), np.zeros((K-1, D-1))))
    C = 0. * np.random.randn(K - 1, D)

    truemodel = MultinomialLDS(K,
                               D,
                               init_dynamics_distn=Gaussian(mu=mu_init,
                                                            sigma=sigma_init),
                               dynamics_distn=AutoRegression(
                                   A=A, sigma=sigma_states),
                               C=C)

    data_list = []
    Xs = []
    for i in range(Ndata):
        data = truemodel.generate(T=T, N=N)
        data_list.append(data)
        Xs.append(data["x"])
    return data_list, Xs
Ejemplo n.º 2
0
def sample_slds_model():
    mu_init = np.zeros(D_latent)
    mu_init[0] = 2.0
    sigma_init = 0.01 * np.eye(D_latent)

    def random_rotation(n, theta):
        rot = 0.99 * np.array([[np.cos(theta), -np.sin(theta)],
                               [np.sin(theta), np.cos(theta)]])
        out = np.zeros((n, n))
        out[:2, :2] = rot
        q = np.linalg.qr(np.random.randn(n, n))[0]
        return q.dot(out).dot(q.T)

    def random_dynamics(n):
        A = np.random.randn(n,n)
        A = A.dot(A.T)
        U,S,V = np.linalg.svd(A)
        A_stable = U.dot(np.diag(S/(1.1*np.max(S)))).dot(V.T)
        # A_stable = U.dot(0.99 * np.eye(n)).dot(V.T)
        return A_stable

    ths = np.linspace(0, np.pi/8., K)
    As = [random_rotation(D_latent, ths[k]) for k in range(K)]
    # As = [random_dynamics(D_latent) for k in range(K)]
    bs = [np.zeros((D_latent, 1))] + [.25 * np.random.randn(D_latent, 1) for k in range(K-1)]

    C = np.random.randn(D_obs, D_latent)
    d = np.zeros((D_obs, 1))
    sigma_obs = 0.5 * np.ones(D_obs)

    ###################
    #  generate data  #
    ###################
    init_dynamics_distns = [Gaussian(mu=mu_init, sigma=sigma_init) for _ in range(K)]
    dynamics_distns = [Regression(A=np.hstack((A, b)), sigma=0.01 * np.eye(D_latent)) for A,b in zip(As, bs)]
    emission_distns = DiagonalRegression(D_obs, D_latent+1, A=np.hstack((C, d)), sigmasq=sigma_obs)

    slds = HMMSLDS(
        dynamics_distns=dynamics_distns,
        emission_distns=emission_distns,
        init_dynamics_distns=init_dynamics_distns,
        alpha=3., init_state_distn='uniform')

    #### MANUALLY CREATE DATA
    P = np.ones((K,K)) + 1 * np.eye(K)
    P = P / np.sum(P,1,keepdims=True)
    z = np.zeros(T//D, dtype=np.int32)
    for t in range(1,T//D):
        z[t] = np.random.choice(np.arange(K), p=P[z[t-1]])
    z = np.repeat(z, D)

    statesobj = slds._states_class(model=slds, T=z.size, stateseq=z, inputs=np.ones((z.size, 1)))
    statesobj.generate_gaussian_states()
    y = statesobj.data = statesobj.generate_obs()
    x = statesobj.gaussian_states
    slds.states_list.append(statesobj)

    return z,x,y,slds
Ejemplo n.º 3
0
def test_hmm_likelihood_perf(T=10000, K=50, D=20):
    # Create a true HMM
    A = npr.rand(K, K)
    A /= A.sum(axis=1, keepdims=True)
    A = 0.75 * np.eye(K) + 0.25 * A
    C = npr.randn(K, D)
    sigma = 0.01

    # Sample from the true HMM
    z = np.zeros(T, dtype=int)
    y = np.zeros((T, D))
    for t in range(T):
        if t > 0:
            z[t] = np.random.choice(K, p=A[z[t - 1]])
        y[t] = C[z[t]] + np.sqrt(sigma) * npr.randn(D)

    # Compare to pyhsmm answer
    from pyhsmm.models import HMM as OldHMM
    from pybasicbayes.distributions import Gaussian
    oldhmm = OldHMM(
        [Gaussian(mu=C[k], sigma=sigma * np.eye(D)) for k in range(K)],
        trans_matrix=A,
        init_state_distn="uniform")

    states = oldhmm.add_data(y)
    tic = time()
    true_lkhd = states.log_likelihood()
    pyhsmm_dt = time() - tic
    print("PyHSMM: ", pyhsmm_dt, "sec. Val: ", true_lkhd)

    # Make an HMM with these parameters
    hmm = ssm.HMM(K, D, observations="gaussian")
    hmm.transitions.log_Ps = np.log(A)
    hmm.observations.mus = C
    hmm.observations._sqrt_Sigmas = np.sqrt(sigma) * np.array(
        [np.eye(D) for k in range(K)])

    tic = time()
    test_lkhd = hmm.log_probability(y)
    smm_dt = time() - tic
    print("SMM HMM: ", smm_dt, "sec. Val: ", test_lkhd)

    # Make an ARHMM with these parameters
    arhmm = ssm.HMM(K, D, observations="ar")
    tic = time()
    arhmm.log_probability(y)
    arhmm_dt = time() - tic
    print("SSM ARHMM: ", arhmm_dt, "sec.")

    # Make an ARHMM with these parameters
    arhmm = ssm.HMM(K, D, observations="ar")
    tic = time()
    arhmm.expected_states(y)
    arhmm_dt = time() - tic
    print("SSM ARHMM Expectations: ", arhmm_dt, "sec.")
Ejemplo n.º 4
0
    def __init__(self, data, T, alpha_beta):
        mu, sigma = np.zeros(T), np.eye(T)
        self.theta_prior = \
            Gaussian(
                mu=mu, sigma=sigma, mu_0=mu, sigma_0=T*sigma/10.,
                nu_0=T/10., kappa_0=10.)

        self.ppgs = initialize_polya_gamma_samplers()
        self.omega = np.zeros((data.shape[0], T))

        super(LogisticNormalCorrelatedLDA, self).__init__(data, T, alpha_beta)
Ejemplo n.º 5
0
    def __init__(self, data, T, alpha_beta):
        mu, sigma = compute_uniform_mean_psi(T)
        self.theta_prior = Gaussian(mu=mu,
                                    sigma=sigma,
                                    mu_0=mu,
                                    sigma_0=T * sigma / 10.,
                                    nu_0=T / 10.,
                                    kappa_0=1. / 10)

        self.ppgs = initialize_polya_gamma_samplers()
        self.omega = np.zeros((data.shape[0], T - 1))

        super(StickbreakingCorrelatedLDA, self).__init__(data, T, alpha_beta)
Ejemplo n.º 6
0
    def __init__(self,
                 N,
                 B,
                 mu_0=0.0,
                 sigma_0=1.0,
                 kappa_0=1.0,
                 nu_0=3.0,
                 is_diagonal_weight_special=True,
                 **kwargs):
        super(_IndependentGaussianMixin, self).__init__(N, B)

        mu_0 = expand_scalar(mu_0, (B, ))
        sigma_0 = expand_cov(sigma_0, (B, B))
        self._gaussian = Gaussian(mu_0=mu_0,
                                  sigma_0=sigma_0,
                                  kappa_0=kappa_0,
                                  nu_0=max(nu_0, B + 2.))

        self.is_diagonal_weight_special = is_diagonal_weight_special
        if is_diagonal_weight_special:
            self._self_gaussian = \
                Gaussian(mu_0=mu_0, sigma_0=sigma_0, kappa_0=kappa_0, nu_0=nu_0)
Ejemplo n.º 7
0
def fit_lds_model(Xs, Xtest, N_samples=100):
    model = MultinomialLDS(K,
                           D,
                           init_dynamics_distn=Gaussian(mu_0=np.zeros(D),
                                                        sigma_0=np.eye(D),
                                                        kappa_0=1.0,
                                                        nu_0=D + 1.0),
                           dynamics_distn=AutoRegression(nu_0=D + 1,
                                                         S_0=np.eye(D),
                                                         M_0=np.zeros((D, D)),
                                                         K_0=np.eye(D)),
                           sigma_C=1)

    for X in Xs:
        model.add_data(X)
    data = model.data_list[0]

    samples = []
    lls = []
    test_lls = []
    mc_test_lls = []
    pis = []
    psis = []
    zs = []
    timestamps = [time.time()]
    for smpl in progprint_xrange(N_samples):
        model.resample_model()
        timestamps.append(time.time())

        samples.append(model.copy_sample())
        # TODO: Use log_likelihood() to marginalize over z
        lls.append(model.log_likelihood())
        # test_lls.append(model.heldout_log_likelihood(Xtest, M=50)[0])
        mc_test_lls.append(model._mc_heldout_log_likelihood(Xtest, M=1)[0])
        pis.append(model.pi(data))
        psis.append(model.psi(data))
        zs.append(data["states"].stateseq)

    lls = np.array(lls)
    test_lls = np.array(test_lls)
    pis = np.array(pis)
    psis = np.array(psis)
    zs = np.array(zs)
    timestamps = np.array(timestamps)
    timestamps -= timestamps[0]
    return model, lls, test_lls, mc_test_lls, pis, psis, zs, timestamps
Ejemplo n.º 8
0
def test_expectations(T=1000, K=20, D=2):
    # Create a true HMM
    A = npr.rand(K, K)
    A /= A.sum(axis=1, keepdims=True)
    A = 0.75 * np.eye(K) + 0.25 * A
    C = npr.randn(K, D)
    sigma = 0.01

    # Sample from the true HMM
    z = np.zeros(T, dtype=int)
    y = np.zeros((T, D))
    for t in range(T):
        if t > 0:
            z[t] = np.random.choice(K, p=A[z[t - 1]])
        y[t] = C[z[t]] + np.sqrt(sigma) * npr.randn(D)

    # Compare to pyhsmm answer
    from pyhsmm.models import HMM as OldHMM
    from pyhsmm.basic.distributions import Gaussian
    oldhmm = OldHMM(
        [Gaussian(mu=C[k], sigma=sigma * np.eye(D)) for k in range(K)],
        trans_matrix=A,
        init_state_distn="uniform")
    oldhmm.add_data(y)
    states = oldhmm.states_list.pop()
    states.E_step()
    true_Ez = states.expected_states
    true_E_trans = states.expected_transcounts

    # Make an HMM with these parameters
    hmm = HMM(K, D, observations="diagonal_gaussian")
    hmm.transitions.log_Ps = np.log(A)
    hmm.observations.mus = C
    hmm.observations.sigmasq = sigma * np.ones((K, D))
    test_Ez, test_Ezzp1, _ = hmm.expected_states(y)
    test_E_trans = test_Ezzp1.sum(0)

    print(true_E_trans.round(3))
    print(test_E_trans.round(3))

    assert np.allclose(true_Ez, test_Ez)
    assert np.allclose(true_E_trans, test_E_trans)
Ejemplo n.º 9
0
def DefaultPoissonLDS(
    D_obs,
    D_latent,
    D_input=0,
    mu_init=None,
    sigma_init=None,
    A=None,
    B=None,
    sigma_states=None,
    C=None,
    d=None,
):
    assert D_input == 0, "Inputs are not yet supported for Poisson LDS"
    model = LaplaceApproxPoissonLDS(
        init_dynamics_distn=Gaussian(mu_0=np.zeros(D_latent),
                                     sigma_0=np.eye(D_latent),
                                     kappa_0=1.0,
                                     nu_0=D_latent + 1),
        dynamics_distn=Regression(A=0.9 * np.eye(D_latent),
                                  sigma=np.eye(D_latent),
                                  nu_0=D_latent + 1,
                                  S_0=D_latent * np.eye(D_latent),
                                  M_0=np.zeros((D_latent, D_latent)),
                                  K_0=D_latent * np.eye(D_latent)),
        emission_distn=PoissonRegression(D_obs, D_latent, verbose=False))

    set_default = \
        lambda prm, val, default: \
            model.__setattr__(prm, val if val is not None else default)

    set_default("mu_init", mu_init, np.zeros(D_latent))
    set_default("sigma_init", sigma_init, np.eye(D_latent))

    set_default("A", A, 0.99 * random_rotation(D_latent))
    set_default("B", B, 0.1 * np.random.randn(D_latent, D_input))
    set_default("sigma_states", sigma_states, 0.1 * np.eye(D_latent))

    set_default("C", C, np.random.randn(D_obs, D_latent))
    set_default("d", d, np.zeros((D_obs, 1)))

    return model
Ejemplo n.º 10
0
def fit_arhmm(x, affine=True):
    print("Fitting Sticky ARHMM")
    dynamics_hypparams = \
        dict(nu_0=D_latent + 2,
             S_0=np.eye(D_latent),
             M_0=np.hstack((np.eye(D_latent), np.zeros((D_latent, int(affine))))),
             K_0=np.eye(D_latent + affine),
             affine=affine)
    dynamics_hypparams = get_empirical_ar_params([x], dynamics_hypparams)

    dynamics_distns = [
        AutoRegression(
            A=np.column_stack((0.99 * np.eye(D_latent),
                               np.zeros((D_latent, int(affine))))),
            sigma=np.eye(D_latent),
            **dynamics_hypparams)
        for _ in range(args.K)]

    init_distn = Gaussian(nu_0=D_latent + 2,
                          sigma_0=np.eye(D_latent),
                          mu_0=np.zeros(D_latent),
                          kappa_0=1.0)

    arhmm = ARWeakLimitStickyHDPHMM(
        init_state_distn='uniform',
        init_emission_distn=init_distn,
        obs_distns=dynamics_distns,
        alpha=3.0, kappa=10.0, gamma=3.0)

    arhmm.add_data(x)

    lps = []
    for _ in tqdm(range(args.N_samples)):
        arhmm.resample_model()
        lps.append(arhmm.log_likelihood())

    z_init = arhmm.states_list[0].stateseq
    z_init = np.concatenate(([0], z_init))

    return arhmm, z_init
Ejemplo n.º 11
0
def make_rslds_parameters(C_init):
    init_dynamics_distns = [
        Gaussian(
            mu=np.zeros(D_latent),
            sigma=3 * np.eye(D_latent),
            nu_0=D_latent + 2,
            sigma_0=3. * np.eye(D_latent),
            mu_0=np.zeros(D_latent),
            kappa_0=1.0,
        ) for _ in range(K)
    ]

    ths = np.random.uniform(np.pi / 30., 1.0, size=K)
    As = [random_rotation(D_latent, th) for th in ths]
    As = [np.hstack((A, np.ones((D_latent, 1)))) for A in As]
    dynamics_distns = [
        Regression(
            A=As[k],
            sigma=np.eye(D_latent),
            nu_0=D_latent + 1000,
            S_0=np.eye(D_latent),
            M_0=np.hstack((np.eye(D_latent), np.zeros((D_latent, 1)))),
            K_0=np.eye(D_latent + 1),
        ) for k in range(K)
    ]

    if C_init is not None:
        emission_distns = \
            DiagonalRegression(D_obs, D_latent + 1,
                               A=C_init.copy(), sigmasq=np.ones(D_obs),
                               alpha_0=2.0, beta_0=2.0)
    else:
        emission_distns = \
            DiagonalRegression(D_obs, D_latent + 1,
                               alpha_0=2.0, beta_0=2.0)

    return init_dynamics_distns, dynamics_distns, emission_distns
Ejemplo n.º 12
0
    def __init__(self,
                 N,
                 B=1,
                 dim=2,
                 b=0.5,
                 sigma=None,
                 Sigma_0=None,
                 nu_0=None,
                 mu_self=0.0,
                 eta=0.01):
        """
        Initialize SBM with parameters defined above.
        """
        super(LatentDistanceGaussianWeightDistribution, self).__init__(N)
        self.B = B
        self.dim = dim

        self.b = b
        self.eta = eta
        self.L = np.sqrt(eta) * np.random.randn(N, dim)

        if Sigma_0 is None:
            Sigma_0 = np.eye(B)

        if nu_0 is None:
            nu_0 = B + 2

        self.cov = GaussianFixedMean(mu=np.zeros(B),
                                     sigma=sigma,
                                     lmbda_0=Sigma_0,
                                     nu_0=nu_0)

        # Special case self-weights (along the diagonal)
        self._self_gaussian = Gaussian(mu_0=mu_self * np.ones(B),
                                       sigma_0=Sigma_0,
                                       nu_0=nu_0,
                                       kappa_0=1.0)
Ejemplo n.º 13
0
def test_viterbi(T=1000, K=20, D=2):
    # Create a true HMM
    A = npr.rand(K, K)
    A /= A.sum(axis=1, keepdims=True)
    A = 0.75 * np.eye(K) + 0.25 * A
    C = npr.randn(K, D)
    sigma = 0.01

    # Sample from the true HMM
    z = np.zeros(T, dtype=int)
    y = np.zeros((T, D))
    for t in range(T):
        if t > 0:
            z[t] = np.random.choice(K, p=A[z[t - 1]])
        y[t] = C[z[t]] + np.sqrt(sigma) * npr.randn(D)

    # Compare to pyhsmm answer
    from pyhsmm.models import HMM as OldHMM
    from pyhsmm.basic.distributions import Gaussian
    oldhmm = OldHMM(
        [Gaussian(mu=C[k], sigma=sigma * np.eye(D)) for k in range(K)],
        trans_matrix=A,
        init_state_distn="uniform")
    oldhmm.add_data(y)
    states = oldhmm.states_list.pop()
    states.Viterbi()
    z_star = states.stateseq

    # Make an HMM with these parameters
    hmm = ssm.HMM(K, D, observations="diagonal_gaussian")
    hmm.transitions.log_Ps = np.log(A)
    hmm.observations.mus = C
    hmm.observations.sigmasq = sigma * np.ones((K, D))
    z_star2 = hmm.most_likely_states(y)

    assert np.allclose(z_star, z_star2)
Ejemplo n.º 14
0
A[:2,:2] = \
    0.99*np.array([[np.cos(np.pi/24), -np.sin(np.pi/24)],
                   [np.sin(np.pi/24),  np.cos(np.pi/24)]])
sigma_states = 0.1 * np.eye(D)

K = 3
# C = np.hstack((np.ones((K-1, 1)), np.zeros((K-1, D-1))))
C = np.random.randn(K - 1, D)

###################
#  generate data  #
###################

model = MultinomialLDS(K,
                       D,
                       init_dynamics_distn=Gaussian(mu=mu_init,
                                                    sigma=sigma_init),
                       dynamics_distn=AutoRegression(A=A, sigma=sigma_states),
                       C=C)
data = model.generate(T=T, N=N, keep=False)
# data["x"] = np.hstack([np.zeros((T,K-1)), np.ones((T,1))])

# Estimate the held out likelihood using Monte Carlo
M = 10000
hll_mc, std_mc = model._mc_heldout_log_likelihood(data["x"], M=M)

# Estimate the held out log likelihood
# hll_info, std_info = model._info_form_heldout_log_likelihood(data["x"], M=M)
hll_dist, std_dist = model._distn_form_heldout_log_likelihood(data["x"], M=M)

print("MC. Model: ", hll_mc, " +- ", std_mc)
# print "AN. Model (info): ", hll_info, " +- ", std_info
Ejemplo n.º 15
0
import numpy as np
import sys
from pybasicbayes.distributions import Gaussian
from gen_synthetic import generate_data

if __name__ == '__main__':
    prefix = 'cy'
    K=5
    A = np.array([[.00, .99,0.00, .00, .01],
                  [.01, .00, .99, .00, .00],
                  [.00, .01, .00, .99, .00],
                  [.00, .00, .01, .00, .99],
                  [.99, .00, .00, .01, .00]])
    means= [(0, 0), (10, 10), (10, 10), (0, 0), (10, 10)]
    means = [Gaussian(mu=np.array(m), sigma=np.eye(2)) for m in means]
    means = np.array(means)
    obs, sts, _ = generate_data(A, means, 10000)
    np.savetxt('data/%s_data.txt' % prefix, obs)
    np.savetxt('data/%s_sts.txt' % prefix, sts)
Ejemplo n.º 16
0
D_in = 2
D_obs = 2
Nmax = 2

tgrid = np.linspace(-5 * np.pi, 5 * np.pi, T)
covariate_seq = np.column_stack((np.sin(tgrid), np.cos(tgrid)))
covariate_seq += 0.001 * np.random.randn(T, D_in)

obs_hypparams = {
    'mu_0': np.zeros(D_obs),
    'sigma_0': np.eye(D_obs),
    'kappa_0': 1.0,
    'nu_0': D_obs + 2
}
true_model = \
    PGInputHMM(obs_distns=[Gaussian(**obs_hypparams) for state in range(Nmax)],
             init_state_concentration=1.0,
             D_in=D_in,
             trans_params=dict(sigmasq_A=4.0, sigmasq_b=0.001))

# Set the weights by hand such that they primarily
# depend on the input
true_model.trans_distn.A[0][Nmax:] = [5., 5.]
# true_model.trans_distn.A[1][Nmax:] = [-2.,  2.]
# true_model.trans_distn.A[2][Nmax:] = [-2., -2.]

# generate fake data and plot
dataset = [
    true_model.generate(T, covariates=covariate_seq[:, :D_in])
    for _ in range(5)
]
Ejemplo n.º 17
0
def trainModel(fileName, unit_trial, K=8, xDim=5):
    # unit_trial -- training data
    # randomization
    np.random.seed()

    numTrial, numUnit, numTime = unit_trial.shape

    # factor analysis for initialization
    factor_unit_trial = unit_trial.transpose([0, 2, 1])
    factor_unit_trial = factor_unit_trial.reshape([-1, factor_unit_trial.shape[2]])
    yDim = numUnit
    inputDim = 1 # some constants
    inputs = np.ones((numTime, inputDim))
    estimator = factan(n_components=xDim, tol=0.00001, copy=True,
                       max_iter=10000, noise_variance_init=None,
                       svd_method='randomized', iterated_power=3,
                       random_state=None)
    estimator.fit(factor_unit_trial)
    C_init = estimator.components_.T
    D_init = estimator.mean_.reshape([-1, 1])

    # SLDS fit
    init_dynamics_distns = [Gaussian(nu_0=xDim+3,
                                     sigma_0=3.*np.eye(xDim),
                                     mu_0=np.zeros(xDim),
                                     kappa_0=0.01)
                            for _ in range(K)]

    dynamics_distns = [Regression(nu_0=xDim + 1,
                                  S_0=xDim * np.eye(xDim),
                                  M_0=np.hstack((.99 * np.eye(xDim), np.zeros((xDim, inputDim)))),
                                  K_0=xDim * np.eye(xDim + inputDim))
                       for _ in range(K)]

    As = [np.eye(xDim) for _ in range(K)]
    if inputDim > 0:
        As = [np.hstack((A, np.zeros((xDim, inputDim))))
              for A in As]
    for dd, A in zip(dynamics_distns, As):
        dd.A = A

    sigma_statess = [np.eye(xDim) for _ in range(K)]
    for dd, sigma in zip(dynamics_distns, sigma_statess):
        dd.sigma = sigma

    emission_distns = [DiagonalRegression(yDim, xDim + inputDim,
                                          mu_0=None, Sigma_0=None,
                                          alpha_0=3.0, beta_0=2.0,
                                          A=np.hstack((C_init.copy(), D_init.copy())),
                                          sigmasq=None, niter=1)
                       for _ in range(K)]

    train_model = HMMSLDS(
        init_dynamics_distns= init_dynamics_distns,
        dynamics_distns= dynamics_distns,
        emission_distns= emission_distns,
        alpha=3., init_state_distn='uniform')

    # Adding training data
    for trial in range(numTrial):
        train_model.add_data(unit_trial[trial].T, inputs=inputs)

    print("Initializing with Gibbs")
    N_gibbs_samples = 2000
    def initialize(model):
        model.resample_model()
        return model.log_likelihood()

    gibbs_lls = [initialize(train_model) for _ in progprint_xrange(N_gibbs_samples)]

    print("Fitting with VBEM")
    N_vbem_iters = 100
    def update(model):
        model.VBEM_step()
        return model.log_likelihood()

    train_model._init_mf_from_gibbs()
    vbem_lls = [update(train_model) for _ in progprint_xrange(N_vbem_iters)]

    np.save(fileName + '_gibbs_lls', gibbs_lls)
    np.save(fileName + '_vbem_lls', vbem_lls)
    np.save(fileName + '_train_model', train_model)

    return train_model
Ejemplo n.º 18
0
                      [np.sin(np.pi / 24),
                       np.cos(np.pi / 24)]])
sigma_states = 0.0001 * np.eye(D)

K = 4
# C = np.hstack((np.ones((K-1, 1)), np.zeros((K-1, D-1))))
C = np.random.randn(K - 1, D)
sigma_obs = 0.01 * np.eye(K)

###################
#  generate data  #
###################

truemodel = MultinomialLDS(K,
                           D,
                           init_dynamics_distn=Gaussian(mu=mu_init,
                                                        sigma=sigma_init),
                           dynamics_distn=AutoRegression(A=A,
                                                         sigma=sigma_states),
                           C=C)

data = truemodel.generate(T=T)

###################
#    inference    #
###################
testmodel = MultinomialLDS(K,
                           D,
                           init_dynamics_distn=Gaussian(mu_0=mu_init,
                                                        sigma_0=sigma_init,
                                                        kappa_0=1.0,
                                                        nu_0=3.0),
Ejemplo n.º 19
0
def simulate_nascar():
    assert K_true == 4
    As = [
        random_rotation(D_latent, np.pi / 24.),
        random_rotation(D_latent, np.pi / 48.)
    ]

    # Set the center points for each system
    centers = [np.array([+2.0, 0.]), np.array([-2.0, 0.])]
    bs = [
        -(A - np.eye(D_latent)).dot(center) for A, center in zip(As, centers)
    ]

    # Add a "right" state
    As.append(np.eye(D_latent))
    bs.append(np.array([+0.1, 0.]))

    # Add a "right" state
    As.append(np.eye(D_latent))
    bs.append(np.array([-0.25, 0.]))

    # Construct multinomial regression to divvy up the space
    w1, b1 = np.array([+1.0, 0.0]), np.array([-2.0])  # x + b > 0 -> x > -b
    w2, b2 = np.array([-1.0, 0.0]), np.array([-2.0])  # -x + b > 0 -> x < b
    w3, b3 = np.array([0.0, +1.0]), np.array([0.0])  # y > 0
    w4, b4 = np.array([0.0, -1.0]), np.array([0.0])  # y < 0

    reg_W = np.column_stack((100 * w1, 100 * w2, 10 * w3, 10 * w4))
    reg_b = np.concatenate((100 * b1, 100 * b2, 10 * b3, 10 * b4))

    # Make a recurrent SLDS with these params #
    dynamics_distns = [
        Regression(
            A=np.column_stack((A, b)),
            sigma=1e-4 * np.eye(D_latent),
            nu_0=D_latent + 2,
            S_0=1e-4 * np.eye(D_latent),
            M_0=np.zeros((D_latent, D_latent + 1)),
            K_0=np.eye(D_latent + 1),
        ) for A, b in zip(As, bs)
    ]

    init_dynamics_distns = [
        Gaussian(mu=np.array([0.0, 1.0]), sigma=1e-3 * np.eye(D_latent))
        for _ in range(K)
    ]

    C = np.hstack((npr.randn(D_obs, D_latent), np.zeros((D_obs, 1))))
    emission_distns = \
        DiagonalRegression(D_obs, D_latent+1,
                           A=C, sigmasq=1e-5 *np.ones(D_obs),
                           alpha_0=2.0, beta_0=2.0)

    model = SoftmaxRecurrentOnlySLDS(trans_params=dict(W=reg_W, b=reg_b),
                                     init_state_distn='uniform',
                                     init_dynamics_distns=init_dynamics_distns,
                                     dynamics_distns=dynamics_distns,
                                     emission_distns=emission_distns,
                                     alpha=3.)

    #########################
    # Sample from the model #
    #########################
    inputs = np.ones((T, 1))
    y, x, z = model.generate(T=T, inputs=inputs)

    # Maks off some data
    if mask_start == mask_stop:
        mask = None
    else:
        mask = np.ones((T, D_obs), dtype=bool)
        mask[mask_start:mask_stop] = False

    # Print the true parameters
    np.set_printoptions(precision=2)
    print("True W:\n{}".format(model.trans_distn.W))
    print("True logpi:\n{}".format(model.trans_distn.logpi))

    return model, inputs, z, x, y, mask
Ejemplo n.º 20
0
    out[:2, :2] = rot
    q = np.linalg.qr(np.random.randn(n, n))[0]
    return q.dot(out).dot(q.T)


As = [
    random_rotation(D_latent, np.pi / 24.),
    random_rotation(D_latent, np.pi / 8.)
]

# Start with a random emission matrix
C = np.random.randn(D_obs, D_latent)
b = -2.0 * np.ones((D_obs, 1))

init_dynamics_distns = [
    Gaussian(mu=mu_init, sigma=sigma_init) for _ in range(K)
]
dynamics_distns = [Regression(A=A, sigma=0.01 * np.eye(D_latent)) for A in As]
emission_distns = BernoulliRegression(D_obs, D_latent, A=C, b=b)

truemodel = HMMCountSLDS(dynamics_distns=dynamics_distns,
                         emission_distns=emission_distns,
                         init_dynamics_distns=init_dynamics_distns,
                         alpha=3.,
                         init_state_distn='uniform')

#%%
### Generate data from an SLDS
# Manually create the states object with the mask
T = 1000
stateseq = np.repeat(np.arange(T // 100) % 2, 100).astype(np.int32)
Ejemplo n.º 21
0
def _default_model(model_class,
                   K,
                   D_obs,
                   D_latent,
                   D_input=0,
                   mu_inits=None,
                   sigma_inits=None,
                   As=None,
                   Bs=None,
                   sigma_statess=None,
                   Cs=None,
                   Ds=None,
                   sigma_obss=None,
                   alpha=3.0,
                   init_state_distn='uniform',
                   **kwargs):

    # Initialize init_dynamics_distns
    init_dynamics_distns = \
        [Gaussian(nu_0=D_latent+3,
                  sigma_0=3.*np.eye(D_latent),
                  mu_0=np.zeros(D_latent),
                  kappa_0=0.01)
         for _ in range(K)]

    if mu_inits is not None:
        assert isinstance(mu_inits, list) and len(mu_inits) == K
        for id, mu in zip(init_dynamics_distns, mu_inits):
            id.mu = mu

    if sigma_inits is not None:
        assert isinstance(sigma_inits, list) and len(sigma_inits) == K
        for id, sigma in zip(init_dynamics_distns, sigma_inits):
            id.sigma = sigma

    # Initialize dynamics distributions
    dynamics_distns = [
        Regression(nu_0=D_latent + 1,
                   S_0=D_latent * np.eye(D_latent),
                   M_0=np.hstack(
                       (.99 * np.eye(D_latent), np.zeros(
                           (D_latent, D_input)))),
                   K_0=D_latent * np.eye(D_latent + D_input)) for _ in range(K)
    ]
    if As is not None:
        assert isinstance(As, list) and len(As) == K
        if D_input > 0:
            assert isinstance(Bs, list) and len(Bs) == K
            As = [np.hstack((A, B)) for A, B in zip(As, Bs)]
    else:
        # As = [random_rotation(D_latent) for _ in range(K)]
        As = [np.eye(D_latent) for _ in range(K)]
        if D_input > 0:
            As = [np.hstack((A, np.zeros((D_latent, D_input)))) for A in As]
    for dd, A in zip(dynamics_distns, As):
        dd.A = A

    if sigma_statess is not None:
        assert isinstance(sigma_statess, list) and len(sigma_statess) == K
    else:
        sigma_statess = [np.eye(D_latent) for _ in range(K)]

    for dd, sigma in zip(dynamics_distns, sigma_statess):
        dd.sigma = sigma

    # Initialize emission distributions
    _single_emission = (Cs is not None) and (not isinstance(Cs, list))

    if _single_emission:
        if D_input > 0:
            assert Ds is not None and not isinstance(Ds, list)
            Cs = np.hstack((Cs, Ds))

        if sigma_obss is None:
            sigma_obss = np.eye(D_obs)

        emission_distns = Regression(nu_0=D_obs + 3,
                                     S_0=D_obs * np.eye(D_obs),
                                     M_0=np.zeros((D_obs, D_latent + D_input)),
                                     K_0=D_obs * np.eye(D_latent + D_input),
                                     A=Cs,
                                     sigma=sigma_obss)

    else:
        emission_distns = [
            Regression(nu_0=D_obs + 1,
                       S_0=D_obs * np.eye(D_obs),
                       M_0=np.zeros((D_obs, D_latent + D_input)),
                       K_0=D_obs * np.eye(D_latent + D_input))
            for _ in range(K)
        ]

        if Cs is not None and sigma_obss is not None:
            assert isinstance(Cs, list) and len(Cs) == K
            assert isinstance(sigma_obss, list) and len(sigma_obss) == K
            if D_input > 0:
                assert isinstance(Ds, list) and len(Ds) == K
                Cs = [np.hstack((C, D)) for C, D in zip(Cs, Ds)]
        else:
            Cs = [np.zeros((D_obs, D_latent + D_input)) for _ in range(K)]
            sigma_obss = [0.05 * np.eye(D_obs) for _ in range(K)]

        for ed, C, sigma in zip(emission_distns, Cs, sigma_obss):
            ed.A = C
            ed.sigma = sigma

    model = model_class(init_dynamics_distns=init_dynamics_distns,
                        dynamics_distns=dynamics_distns,
                        emission_distns=emission_distns,
                        init_state_distn=init_state_distn,
                        alpha=alpha,
                        **kwargs)

    return model
Ejemplo n.º 22
0
Archivo: vbem.py Proyecto: zqwei/pyslds
data = data[truemodel.nlags:]

plt.figure()
plt.plot(data[:, 0], data[:, 1], 'x-')

#################
#  build model  #
#################
Cs = [np.eye(D_obs) for _ in range(Kmax)]  # Shared emission matrices
sigma_obss = [0.05 * np.eye(D_obs)
              for _ in range(Kmax)]  # Emission noise covariances

model = HMMSLDS(init_dynamics_distns=[
    Gaussian(nu_0=5,
             sigma_0=3. * np.eye(D_latent),
             mu_0=np.zeros(D_latent),
             kappa_0=0.01,
             mu=np.zeros(D_latent),
             sigma=np.eye(D_latent)) for _ in range(Kmax)
],
                dynamics_distns=[
                    Regression(
                        A=np.hstack(
                            (np.eye(D_latent), np.zeros((D_latent, D_input)))),
                        sigma=np.eye(D_latent),
                        nu_0=D_latent + 3,
                        S_0=D_latent * np.eye(D_latent),
                        M_0=np.hstack(
                            (np.eye(D_latent), np.zeros((D_latent, D_input)))),
                        K_0=D_latent * np.eye(D_latent + D_input),
                    ) for _ in range(Kmax)
                ],
Ejemplo n.º 23
0
    ax.set_ylabel("$x_{t,2}$")
    ax.set_title("Superposition")

    return ax




if __name__ == "__main__":
    tree = balanced_binary_tree(4)

    # Make rSLDS parameters
    init_dynamics_distns = [
        Gaussian(
            mu=np.zeros(D_latent),
            sigma=np.eye(D_latent),
            nu_0=D_latent + 2, sigma_0=3. * np.eye(D_latent),
            mu_0=np.zeros(D_latent), kappa_0=1.0,
        )
        for _ in range(K)]

    # Create hierarchical dynamics model
    hierarchical_dynamics_distn = \
        TreeStructuredHierarchicalDynamics(tree, 0.1 * np.eye(D_latent), 3 * np.eye(D_latent + 1))

    emission_distns = \
        DiagonalRegression(D_obs, D_latent + 1,
                           alpha_0=2.0, beta_0=2.0)

    model = TreeStructuredPGRecurrentSLDS(
        trans_params=dict(sigmasq_A=10., sigmasq_b=0.01),
        init_state_distn='uniform',
Ejemplo n.º 24
0
def main(name, datadir, datafn, K, expdir=None, nfolds=1, nrestarts=1, seed=None):
    """ Run experiment on 4 state, two group synthetic data.

        name : Name of experiment.

        datadir : Path to directory containing data.

        datafn : Prefix name to files that data and missing masks are stored
                 in.

        K : Number of components in HMM.

        expdir : Path to directory to store experiment results.  If None
                 (default), then a directory, `name`_results, is made in the
                 current directory.

        nfolds : Number of folds to generate if datafn is None.

        nrestarts : Number of random initial parameters.

        seed : Random number seed.
    """

    # Set seed for reproducibility
    np.random.seed(seed)

    # Generate/Load data and folds (missing masks)
    # These are the emission distributions for the following tests
    if not os.path.exists(datadir):
        raise RuntimeError("Could not find datadir: %s" % (datadir,))
    else:
        if not os.path.isdir(datadir):
            raise RuntimeError("datadir: %s exists but is not a directory" % (datadir,))

    if datafn is None:
        datafn = name

    dpath = os.path.join(datadir, datafn + "_data.txt")
    mpath = os.path.join(datadir, datafn + "_fold*.txt")
    try:
        X = np.loadtxt(dpath)
    except IOError:
        if os.path.exists(dpath) and not os.path.isdir(dpath):
            raise RuntimeError("Could not load data: %s" % (dpath,))

    masks = glob.glob(mpath)
    if len(masks) == 0:
        masks = [None]

    # Initialize parameter possibilities

    obs_mean = np.mean(X, axis=0)
    mu_0 = obs_mean
    sigma_0 = 0.75*np.cov(X.T)

    # Vague values that keeps covariance matrices p.d.
    kappa_0 = 0.01
    nu_0 = 4

    prior_init = np.ones(K)
    prior_tran = np.ones((K,K))
    N, D = X.shape

    rand_starts = list()
    for r in xrange(nrestarts):
        init_means = np.empty((K,D))
        init_cov = list()
        for k in xrange(K):
            init_means[k,:] = mvnrand(mu_0, cov=sigma_0)
            init_cov.append(sample_invwishart(np.linalg.inv(sigma_0), nu_0))
        # We use prior b/c mu and sigma are sampled here
        prior_emit = np.array([Gaussian(mu=init_means[k,:], sigma=sigma_0,
                                       mu_0=mu_0, sigma_0=sigma_0,
                                       kappa_0=kappa_0, nu_0=nu_0)
                              for k in xrange(K)])

        init_init = np.random.rand(K)
        init_init /= np.sum(init_init)

        init_tran = np.random.rand(K,K)
        init_tran /= np.sum(init_tran, axis=1)[:,np.newaxis]

        # Make dict with initial parameters to pass to experiment.
        pd = {'init_init': init_init, 'init_tran': init_tran,
              'prior_init': prior_init, 'prior_tran': prior_tran,
              'prior_emit': prior_emit, 'maxit': maxit, 'verbose': verbose}
        rand_starts.append(pd)

    # Compute Cartesian product of random starts with other possible parameter
    # values, make a generator to fill in entries in the par dicts created
    # above, and then construct the par_list by calling the generator with the
    # Cartesian product iterator.
    par_prod_iter = itertools.product(rand_starts, taus, kappas, reuse_msg,
                                      grow_buffer, Ls, correct_trans)

    def gen_par(par_tuple):
        d = copy.copy(par_tuple[0])
        d['tau'] = par_tuple[1]
        d['kappa'] = par_tuple[2]
        d['reuseMsg'] = par_tuple[3]
        d['growBuffer'] = par_tuple[4]
        d['metaobs_half'] = par_tuple[5]
        d['correctTrans'] = par_tuple[6]
        d['mb_sz'] = 100//(2*par_tuple[5]+1)
        return d

    # Call gen_par on each par product to pack into dictionary to pass to
    # experiment.
    par_list = itertools.imap(gen_par, par_prod_iter)

    # Create ExperimentSequential and call run_exper
    dname = os.path.join(datadir, datafn + "_data.txt")
    exp = ExpSeq(datafn, dname, run_exper, par_list,
                 masks=masks, exper_dir=expdir)
    exp.run()
Ejemplo n.º 25
0
def random_rotation(n,theta):
    rot = np.array([[np.cos(theta), -np.sin(theta)],
                    [np.sin(theta), np.cos(theta)]])
    out = np.zeros((n,n))
    out[:2,:2] = rot
    q = np.linalg.qr(np.random.randn(n,n))[0]
    return q.dot(out).dot(q.T)

As = [random_rotation(D_latent, np.pi/24.),
     random_rotation(D_latent, np.pi/8.)]

# Start with a random emission matrix
C = np.random.randn(D_obs, D_latent)
b = -2.0 * np.ones((D_obs, 1))

init_dynamics_distns = [Gaussian(mu=mu_init, sigma=sigma_init) for _ in range(K)]
dynamics_distns = [Regression(A=A, sigma=0.01*np.eye(D_latent)) for A in As]
emission_distns = BernoulliRegression(D_obs, D_latent, A=C, b=b)

truemodel = HMMCountSLDS(
    dynamics_distns=dynamics_distns,
    emission_distns=emission_distns,
    init_dynamics_distns=init_dynamics_distns,
    alpha=3., init_state_distn='uniform')

### Generate data from an SLDS
# Manually create the states object with the mask
T = 1000
stateseq = np.repeat(np.arange(T//100) % 2, 100).astype(np.int32)
statesobj = truemodel._states_class(model=truemodel, T=stateseq.size, stateseq=stateseq)
statesobj.generate_gaussian_states()
Ejemplo n.º 26
0
def make_roslds_mtpl(V, I_inj_values, V_compartment,  K=3, D_latent=2, sigmasq_value=1e-4, penalty=.05):
    """
        :param V: the (T,) array of voltage observations
        :param K: the number of discrete states (integer)
        :param D_latent: the dimension of the continuous latent states
        """
    assert V.ndim == 1, "V must be a shape (T,) array of voltage observations"
    T = V.shape[0]
    D_obs = 2

    directory = './results/'
    # set initial plane
    if K > 1:
        reg_W, reg_b = make_initial_plane(K)


        # Scale the weights to make the transition boundary sharper
        reg_scale = 100.
        reg_b *= reg_scale
        reg_W *= reg_scale



    # Create the model components
    # (initial state dist, dynamics dist, emissions dist)
    init_dynamics_distns = [
                            Gaussian(
                                     mu=np.zeros(D_latent),
                                     sigma=np.eye(D_latent),
                                     nu_0=D_latent + 2, sigma_0=3 * np.eye(D_latent),
                                     mu_0=np.zeros(D_latent), kappa_0=1.0,
                                     )
                            for _ in range(K)]

    dynamics_distns = [
                       Regression(
                                  nu_0=D_latent + 6,#4,
                                  S_0=1e-4 * np.eye(D_latent),
                                  M_0=np.hstack((np.eye(D_latent), np.zeros((D_latent, 3)))),#2)))),
                                  K_0=np.eye(D_latent + 3),#2),
                                  affine=False
                                  )
                       for _ in range(K)]


    # Constrain the emission matrix to have C = [[1, 0, ..., 0]]
    # and small sigmasq
    # C = np.hstack((np.eye(D_obs), np.zeros((D_obs, 2))))
    C = np.hstack((np.eye(D_obs), np.zeros((D_latent, 3))))#2))))
    #sigmasq = np.concatenate((np.array([1e-4]), np.ones(D_obs-1)))
    sigmasq = np.array([sigmasq_value, penalty])
    emission_distns = \
        DiagonalRegression(D_obs, D_latent+3,#+2,
                           A=C, sigmasq=sigmasq)

    # Construct the full model

    if K == 1:
        rslds = PGRecurrentOnlySLDS(
                                    trans_params=dict(sigmasq_A=10000., sigmasq_b=10000.),
                                    init_state_distn='uniform',
                                    init_dynamics_distns=init_dynamics_distns,
                                    dynamics_distns=dynamics_distns,
                                    emission_distns=emission_distns,
                                    fixed_emission=True)
    else:
        rslds = PGRecurrentOnlySLDS(
                                trans_params=dict(A=np.hstack((np.zeros((K-1, K)), reg_W)), b=reg_b, sigmasq_A=10000., sigmasq_b=10000.),
                                init_state_distn='uniform',
                                init_dynamics_distns=init_dynamics_distns,
                                dynamics_distns=dynamics_distns,
                                emission_distns=emission_distns,
                                fixed_emission=True)

    # Initialize the continuous states to be V and its gradient
    assert D_latent == 2
    from scipy.ndimage import gaussian_filter1d
    from sklearn.cluster import KMeans
    #dV = gaussian_filter1d(np.gradient(V), 1)
    #nconvolve = 0
    #nconvolve =500#100
    #dV =np.convolve(np.gradient(V), np.ones((nconvolve,))/nconvolve, mode='same')
    V_tmp = V.copy()
    v_thre = 0#3.3 (good)#3#3.5#4#3.5#3#2.5#2.2
    V_tmp[V_tmp<v_thre] = 0
    print('NEW INITIALIZATION!', v_thre)
    #dV = gaussian_filter1d(np.gradient(V_tmp), 1)
    dV = gaussian_filter1d(np.gradient(V_tmp), 10)
    print('convolue dV')
    #x_init = np.column_stack((V, dV))
    x_init = np.column_stack((V_tmp, dV))
    x_init = (x_init - np.mean(x_init, axis=0)) / np.std(x_init, axis=0)

    # Initialize the discrete states by clustering x_init
    km = KMeans(K).fit(x_init)
    z_init = km.labels_

    # Plot the
    #'''
    plt.close('all')
    plt.figure()
    plt.subplot(211)
    plt.plot(x_init[:10000,:])
    # plt.plot(Vs_true[:,j],'r')
    plt.subplot(212)
    plt.plot(z_init[:10000])

    #plt.show()
    plt.savefig(directory+'-init-penalized.png')
    #plt.savefig('nconvolve'+str(nconvolve)+'-init-penalized.png')
    #'''

    # Provide an array of ones for the bias (affine term)
    assert I_inj_values.shape == (T,)
    data = np.column_stack((V, np.zeros(T,))) # pseudo obs
    # add voltage effect on rslds (V_compartments)
    inputs = np.column_stack((np.ones((T, 1)), I_inj_values, V_compartment))
    #inputs = np.column_stack((np.ones((T, 1)), I_inj_values))
    mask = np.ones((T, D_obs), dtype=bool)
    rslds.add_data(data, mask=mask, inputs=inputs,
                   stateseq=z_init, gaussian_states=x_init)

    return rslds
Ejemplo n.º 27
0
def make_roslds(V, I_inj_values, not_noisy=True, K=6, D_latent=2, sigmasq_value=1e-4, penalty=.05):
    """
        :param V: the (T,) array of voltage observations
        :param K: the number of discrete states (integer)
        :param D_latent: the dimension of the continuous latent states
        """

    assert V.ndim == 1, "V must be a shape (T,) array of voltage observations"
    T = V.shape[0]
    D_obs = 2

    '''
    # initialization
    w1, b1 = np.array([+1.0, 0.0]), np.array([0.0])   # x >0
    w2, b2 = np.array([0.0, +1.0]), np.array([0.0])    # y > 0

    reg_W = np.row_stack((w1, w2))
    reg_b = np.row_stack((b1, b2))
    '''
    reg_W, reg_b = make_initial_plane(K)
    # Scale the weights to make the transition boundary sharper
    reg_scale = 100.
    reg_b *= reg_scale
    reg_W *= reg_scale


    # Create the model components
    # (initial state dist, dynamics dist, emissions dist)
    init_dynamics_distns = [Gaussian(mu=np.zeros(D_latent),
                                     sigma=np.eye(D_latent),
                                     nu_0=D_latent + 2, sigma_0=3 * np.eye(D_latent),
                                     mu_0=np.zeros(D_latent), kappa_0=1.0,
                                     ) for _ in range(K)]

    dynamics_distns = [Regression( nu_0=D_latent + 4,
                                  S_0=1e-4 * np.eye(D_latent),
                                  M_0=np.hstack((np.eye(D_latent), np.zeros((D_latent, 2)))),
                                  K_0=np.eye(D_latent + 2),
                                  affine=False
                                  ) for _ in range(K)]

    # Constrain the emission matrix to have C = [[1, 0, ..., 0]]
    # and small sigmasq
    # C = np.hstack((np.eye(D_obs), np.zeros((D_obs, 2))))
    C = np.hstack((np.eye(D_obs), np.zeros((D_latent, 2))))
    #sigmasq = np.concatenate((np.array([1e-4]), np.ones(D_obs-1)))
    sigmasq = np.array([sigmasq_value, penalty])
    emission_distns = DiagonalRegression(D_obs, D_latent+2, A=C, sigmasq=sigmasq)

    # Construct the full model

    if K == 1:
        rslds = PGRecurrentOnlySLDS(trans_params=dict(sigmasq_A=10000., sigmasq_b=10000.),
                                    init_state_distn='uniform',
                                    init_dynamics_distns=init_dynamics_distns,
                                    dynamics_distns=dynamics_distns,
                                    emission_distns=emission_distns,
                                    fixed_emission=True)
    else:
        rslds = PGRecurrentOnlySLDS(trans_params=dict(A=np.hstack((np.zeros((K-1, K)), reg_W)), b=reg_b, sigmasq_A=10000., sigmasq_b=10000.), init_state_distn='uniform', init_dynamics_distns=init_dynamics_distns, dynamics_distns=dynamics_distns, emission_distns=emission_distns, fixed_emission=True)

    # Initialize the continuous states to be V and its gradient
    assert D_latent == 2


    if not_noisy:
        dV = gaussian_filter1d(np.gradient(V), 1)
    else:
        nconvolve =10000
        dV =np.convolve(np.gradient(V), np.ones((nconvolve,))/nconvolve, mode='same')


    x_init = np.column_stack((V, dV))
    x_init = (x_init - np.mean(x_init, axis=0)) / np.std(x_init, axis=0)

    # Initialize the discrete states by clustering x_init
    km = KMeans(K).fit(x_init)
    z_init = km.labels_


    # Provide an array of ones for the bias (affine term)
    assert I_inj_values.shape == (T,)
    data = np.column_stack((V, np.zeros(T,))) # pseudo obs
    inputs = np.column_stack((np.ones((T, 1)), I_inj_values))
    mask = np.ones((T, D_obs), dtype=bool)
    rslds.add_data(data, mask=mask, inputs=inputs,
                   stateseq=z_init, gaussian_states=x_init)

    return rslds
Ejemplo n.º 28
0
sigma_init = 0.01*np.eye(2)

A = 0.99*np.array([[np.cos(np.pi/24), -np.sin(np.pi/24)],
                   [np.sin(np.pi/24),  np.cos(np.pi/24)]])
sigma_states = 0.01*np.eye(2)

C = np.array([[2.,0.]])
D_out, D_in = C.shape


###################
#  generate data  #
###################

truemodel = NegativeBinomialLDS(
    init_dynamics_distn=Gaussian(mu_init, sigma_init),
    dynamics_distn=AutoRegression(A=A,sigma=sigma_states),
    emission_distn=PGEmissions(D_out, D_in, C=C))

T = 2000
data, z_true = truemodel.generate(T)
psi_true = z_true.dot(C.T)
p_true = logistic(psi_true)



###############
#  fit model  #
###############
model = NegativeBinomialLDS(
    init_dynamics_distn=Gaussian(mu_0=np.zeros(D_in), sigma_0=np.eye(D_in),
Ejemplo n.º 29
0
def simulate_nascar():
    assert K_true == 4

    def random_rotation(n, theta):
        rot = np.array([[np.cos(theta), -np.sin(theta)],
                        [np.sin(theta), np.cos(theta)]])
        out = np.zeros((n,n))
        out[:2,:2] = rot
        q = np.linalg.qr(np.random.randn(n,n))[0]
        # q = np.eye(n)
        return q.dot(out).dot(q.T)

    As = [random_rotation(D_latent, np.pi/24.),
          random_rotation(D_latent, np.pi/48.)]

    # Set the center points for each system
    centers = [np.array([+2.0, 0.]),
               np.array([-2.0, 0.])]
    bs = [-(A - np.eye(D_latent)).dot(center) for A, center in zip(As, centers)]

    # Add a "right" state
    As.append(np.eye(D_latent))
    bs.append(np.array([+0.1, 0.]))

    # Add a "right" state
    As.append(np.eye(D_latent))
    bs.append(np.array([-0.35, 0.]))

    # Construct multinomial regression to divvy up the space #
    w1, b1 = np.array([+1.0, 0.0]), np.array([-2.0])   # x + b > 0 -> x > -b
    w2, b2 = np.array([-1.0, 0.0]), np.array([-2.0])   # -x + b > 0 -> x < b
    w3, b3 = np.array([0.0, +1.0]), np.array([0.0])    # y > 0

    reg_W = np.row_stack((w1, w2, w3))
    reg_b = np.row_stack((b1, b2, b3))

    # Scale the weights to make the transition boundary sharper
    reg_scale = 100.
    reg_b *= reg_scale
    reg_W *= reg_scale

    # Account for stick breaking asymmetry
    mu_b, _ = compute_psi_cmoments(np.ones(K_true))
    reg_b += mu_b[:,None]

    # Make a recurrent SLDS with these params #
    dynamics_distns = [
        Regression(
            A=np.column_stack((A,b)),
            sigma=1e-4 * np.eye(D_latent),
            nu_0=D_latent + 2,
            S_0=1e-4 * np.eye(D_latent),
            M_0=np.zeros((D_latent, D_latent + 1)),
            K_0=np.eye(D_latent + 1),
        )
        for A,b in zip(As, bs)]

    init_dynamics_distns = [
        Gaussian(
            mu=np.array([0.0, 1.0]),
            sigma=1e-3 * np.eye(D_latent))
        for _ in range(K_true)]

    C = np.hstack((npr.randn(args.D_obs, D_latent), np.zeros((args.D_obs, 1))))
    emission_distns = \
        DiagonalRegression(args.D_obs, D_latent+1,
                           A=C, sigmasq=1e-5 *np.ones(args.D_obs),
                           alpha_0=2.0, beta_0=2.0)

    model = PGRecurrentSLDS(
        trans_params=dict(A=np.hstack((np.zeros((K_true-1, K_true)), reg_W)), b=reg_b,
                          sigmasq_A=100., sigmasq_b=100.),
        init_state_distn='uniform',
        init_dynamics_distns=init_dynamics_distns,
        dynamics_distns=dynamics_distns,
        emission_distns=emission_distns)

    # Sample from the model
    inputs = np.ones((args.T, 1))
    y, x, z = model.generate(T=args.T, inputs=inputs)

    # Maks off some data
    mask = np.ones((args.T, args.D_obs), dtype=bool)
    mask[args.mask_start:args.mask_stop] = False
    return model, inputs, z, x, y, mask