Ejemplo n.º 1
0
def test_hmm_likelihood(T=500, K=5, D=2):
    # Create a true HMM
    A = npr.rand(K, K)
    A /= A.sum(axis=1, keepdims=True)
    A = 0.75 * np.eye(K) + 0.25 * A
    C = npr.randn(K, D)
    sigma = 0.01

    # Sample from the true HMM
    z = np.zeros(T, dtype=int)
    y = np.zeros((T, D))
    for t in range(T):
        if t > 0:
            z[t] = np.random.choice(K, p=A[z[t - 1]])
        y[t] = C[z[t]] + np.sqrt(sigma) * npr.randn(D)

    # Compare to pyhsmm answer
    from pyhsmm.models import HMM as OldHMM
    from pyhsmm.basic.distributions import Gaussian
    hmm = OldHMM(
        [Gaussian(mu=C[k], sigma=sigma * np.eye(D)) for k in range(K)],
        trans_matrix=A,
        init_state_distn="uniform")
    true_lkhd = hmm.log_likelihood(y)

    # Make an HMM with these parameters
    hmm = HMM(K, D, observations="gaussian")
    hmm.transitions.log_Ps = np.log(A)
    hmm.observations.mus = C
    hmm.observations.inv_sigmas = np.log(sigma) * np.ones((K, D))
    test_lkhd = hmm.log_probability(y)

    assert np.allclose(true_lkhd, test_lkhd)
Ejemplo n.º 2
0
def test_expectations(T=1000, K=20, D=2):
    # Create a true HMM
    A = npr.rand(K, K)
    A /= A.sum(axis=1, keepdims=True)
    A = 0.75 * np.eye(K) + 0.25 * A
    C = npr.randn(K, D)
    sigma = 0.01

    # Sample from the true HMM
    z = np.zeros(T, dtype=int)
    y = np.zeros((T, D))
    for t in range(T):
        if t > 0:
            z[t] = np.random.choice(K, p=A[z[t - 1]])
        y[t] = C[z[t]] + np.sqrt(sigma) * npr.randn(D)

    # Compare to pyhsmm answer
    from pyhsmm.models import HMM as OldHMM
    from pyhsmm.basic.distributions import Gaussian
    hmm = OldHMM(
        [Gaussian(mu=C[k], sigma=sigma * np.eye(D)) for k in range(K)],
        trans_matrix=A,
        init_state_distn="uniform")
    hmm.add_data(y)
    states = hmm.states_list.pop()
    states.E_step()
    true_Ez = states.expected_states
    true_E_trans = states.expected_transcounts

    # Make an HMM with these parameters
    hmm = HMM(K, D, observations="gaussian")
    hmm.transitions.log_Ps = np.log(A)
    hmm.observations.mus = C
    hmm.observations.inv_sigmas = np.log(sigma) * np.ones((K, D))
    test_Ez, test_Ezzp1, _ = hmm.expected_states(y)
    test_E_trans = test_Ezzp1.sum(0)

    print(true_E_trans.round(3))
    print(test_E_trans.round(3))

    assert np.allclose(true_Ez, test_Ez)
    assert np.allclose(true_E_trans, test_E_trans)
Ejemplo n.º 3
0
def test_viterbi(T=1000, K=20, D=2):
    # Create a true HMM
    A = npr.rand(K, K)
    A /= A.sum(axis=1, keepdims=True)
    A = 0.75 * np.eye(K) + 0.25 * A
    C = npr.randn(K, D)
    sigma = 0.01

    # Sample from the true HMM
    z = np.zeros(T, dtype=int)
    y = np.zeros((T, D))
    for t in range(T):
        if t > 0:
            z[t] = np.random.choice(K, p=A[z[t - 1]])
        y[t] = C[z[t]] + np.sqrt(sigma) * npr.randn(D)

    # Compare to pyhsmm answer
    from pyhsmm.models import HMM as OldHMM
    from pyhsmm.basic.distributions import Gaussian
    oldhmm = OldHMM(
        [Gaussian(mu=C[k], sigma=sigma * np.eye(D)) for k in range(K)],
        trans_matrix=A,
        init_state_distn="uniform")
    oldhmm.add_data(y)
    states = oldhmm.states_list.pop()
    states.Viterbi()
    z_star = states.stateseq

    # Make an HMM with these parameters
    hmm = HMM(K, D, observations="diagonal_gaussian")
    hmm.transitions.log_Ps = np.log(A)
    hmm.observations.mus = C
    hmm.observations.sigmasq = sigma * np.ones((K, D))
    z_star2 = hmm.most_likely_states(y)

    assert np.allclose(z_star, z_star2)
Ejemplo n.º 4
0
import autograd.numpy.random as npr

npr.seed(0)

import matplotlib
import matplotlib.pyplot as plt

from ssm.models import HMM

# Set the parameters of the HMM
T = 500  # number of time bins
K = 5  # number of discrete states
D = 2  # number of observed dimensions

# Make an HMM with the true parameters
true_hmm = HMM(K, D, observations="gaussian")
z, y = true_hmm.sample(T)
z_test, y_test = true_hmm.sample(T)
true_ll = true_hmm.log_probability(y)

# Fit models
N_sgd_iters = 1000
N_em_iters = 100

print("Fitting HMM with SGD")
hmm = HMM(K, D, observations="gaussian")
hmm_sgd_lls = hmm.fit(y, method="sgd", num_iters=N_sgd_iters)
hmm_sgd_test_ll = hmm.log_probability(y_test)
hmm_sgd_smooth = hmm.smooth(y)

print("Fitting HMM with EM")
Ejemplo n.º 5
0
import autograd.numpy.random as npr
npr.seed(0)

import matplotlib
import matplotlib.pyplot as plt

from ssm.models import HMM
from ssm.util import find_permutation

# Set the parameters of the HMM
T = 500  # number of time bins
K = 5  # number of discrete states
D = 2  # number of observed dimensions

# Make an HMM with the true parameters
true_hmm = HMM(K, D, observations="diagonal_gaussian")
z, y = true_hmm.sample(T)
z_test, y_test = true_hmm.sample(T)
true_ll = true_hmm.log_probability(y)

# Fit models
N_sgd_iters = 1000
N_em_iters = 100

# A bunch of observation models that all include the
# diagonal Gaussian as a special case.
observations = [
    "diagonal_gaussian", "gaussian", "diagonal_t", "studentst", "diagonal_ar",
    "ar", "diagonal_robust_ar", "robust_ar"
]
Ejemplo n.º 6
0
def test_sample(T=10, K=4, D=3, M=2):
    """
    Test that we can construct and sample an HMM
    with or withou, prefixes, noise, and noise.
    """
    transition_names = [
        "standard", "sticky", "inputdriven", "recurrent", "recurrent_only",
        "rbf_recurrent", "nn_recurrent"
    ]

    observation_names = [
        "gaussian", "diagonal_gaussian", "t", "diagonal_t", "bernoulli",
        "categorical", "poisson", "vonmises", "ar", "diagonal_ar",
        "independent_ar", "robust_ar", "diagonal_robust_ar"
    ]

    # Sample basic (no prefix, inputs, etc.)
    for transitions in transition_names:
        for observations in observation_names:
            hmm = HMM(K,
                      D,
                      M=0,
                      transitions=transitions,
                      observations=observations)
            zsmpl, xsmpl = hmm.sample(T)

    # Sample with prefix
    for transitions in transition_names:
        for observations in observation_names:
            hmm = HMM(K,
                      D,
                      M=0,
                      transitions=transitions,
                      observations=observations)
            zpre, xpre = hmm.sample(3)
            zsmpl, xsmpl = hmm.sample(T, prefix=(zpre, xpre))

    # Sample with inputs
    for transitions in transition_names:
        for observations in observation_names:
            hmm = HMM(K,
                      D,
                      M=M,
                      transitions=transitions,
                      observations=observations)
            zpre, xpre = hmm.sample(3, input=npr.randn(3, M))
            zsmpl, xsmpl = hmm.sample(T,
                                      prefix=(zpre, xpre),
                                      input=npr.randn(T, M))

    # Sample without noise
    for transitions in transition_names:
        for observations in observation_names:
            hmm = HMM(K,
                      D,
                      M=M,
                      transitions=transitions,
                      observations=observations)
            zpre, xpre = hmm.sample(3, input=npr.randn(3, M))
            zsmpl, xsmpl = hmm.sample(T,
                                      prefix=(zpre, xpre),
                                      input=npr.randn(T, M),
                                      with_noise=False)
Ejemplo n.º 7
0
from ssm.models import HMM
from ssm.util import find_permutation

# Speficy whether or not to save figures
save_figures = True

# In[2]:

# Set the parameters of the HMM
T = 200  # number of time bins
K = 5  # number of discrete states
D = 2  # data dimension

# Make an HMM
true_hmm = HMM(K, D, observations="gaussian")

# Manually tweak the means to make them farther apart
thetas = np.linspace(0, 2 * np.pi, K, endpoint=False)
true_hmm.observations.mus = 3 * np.column_stack(
    (np.cos(thetas), np.sin(thetas)))

# In[3]:

# Sample some data from the HMM
z, y = true_hmm.sample(T)
true_ll = true_hmm.log_probability(y)

# In[4]:

# Plot the observation distributions
Ejemplo n.º 8
0
    K = K       # number of discrete states
    N = latents_ses1.shape[2]      # number of observed dimensions
    
    print('T:',T,', K:',K,', N:',N)
    
    trialdata_ses1=[latents_ses1[i,:,:] for i in np.arange(latents_ses1.shape[0])]
    
    if concatenate_sessions:
        trialdata_ses1=trialdata_ses1+[latents_ses2[i,:,:] for i in np.arange(latents_ses2.shape[0])]
        
    else:
        trialdata_ses2=[latents_ses2[i,:,:] for i in np.arange(latents_ses2.shape[0])]
    
    if regularize:
        arhmm_ses1 = HMM(K,N, observations="ar",
                    transitions="sticky",
                    transition_kwargs=dict(kappa=10),
                    observation_kwargs=dict(regularization_params=dict(type='l2',lambda_A=lambda_reg)))
        regularizestring='_regl2'
    else:
        arhmm_ses1 = HMM(K,N, observations="ar",
                    transitions="sticky",
                    transition_kwargs=dict(kappa=10))
        regularizestring=''

    arhmm_em_lls_ses1 = arhmm_ses1.fit(trialdata_ses1, method="em", num_em_iters=numiters)
    
    # Get the inferred states for session 1
    trialdata_ses1_z=[arhmm_ses1.most_likely_states(trial) for trial in trialdata_ses1]
    trialdata_ses1_z=np.asarray(trialdata_ses1_z)
    As_ses1=[None]*K; 
    for k in np.arange(K):
Ejemplo n.º 9
0
    # Put data in list format to feed into the HMM
    trialdata=[pcdata[i,:,:] for i in np.arange(pcdata.shape[0])]
    
    # shuffle the trials
    sequence = [i for i in range(len(trialdata))]
    npr.shuffle(sequence)
    sequence=np.array(sequence)
    
    # Divide into training and testing (I didn't really end up using the testing - but can check log likelihoods to decide K)
    traintrials=[trialdata[j] for j in sequence[:int(np.ceil(0.8*len(trialdata)))]]
    testtrials=[trialdata[j] for j in sequence[int(np.ceil(0.8*len(trialdata))):]]
    print(len(traintrials)); print(len(testtrials))              
    
    # Run the ARHMM
    arhmm = HMM(K,N, observations="ar",
                transitions="sticky",
                transition_kwargs=dict(kappa=kappa))
    
    arhmm_em_lls = arhmm.fit(traintrials, method="em", num_em_iters=numiters)
    
    # Get the inferred states for train and test trials
    traintrials_z=[arhmm.most_likely_states(traintrial) for traintrial in traintrials]
    traintrials_z=np.asarray(traintrials_z)
    testtrials_z=[arhmm.most_likely_states(testtrial) for testtrial in testtrials]
    testtrials_z=np.asarray(testtrials_z)


    As=[None]*K; maxvals=[None]*K
    for k in np.arange(K):
        As[k]=arhmm.params[2][0][k,:,:]; 
        maxvals[k]=np.var(As[k])    # Tried to permute the states so that it would be 'no movement' --> 'movement', based on the variance of the values in the A matrix (didn't really work)
Ejemplo n.º 10
0
npr.seed(0)


# In[2]:


# Set the parameters of the HMM
T = 1000 # number of time bins
K = 2    # number of discrete states
D = 1    # data dimension
M = 1    # input dimension
C = 3    # number of output types/categories

# Make an HMM
true_hmm = HMM(K, D, M, 
               observations="categorical", observation_kwargs=dict(C=C),
               transitions="inputdriven")

# Optionally, turn up the input weights to exaggerate the effect
# true_hmm.transitions.Ws *= 3

# Create an exogenous input
inpt = np.sin(2 * np.pi * np.arange(T) / 50)[:, None] + 1e-1 * npr.randn(T, M)

# Sample some data from the HMM
z, y = true_hmm.sample(T, input=inpt)

# Compute the true log probability of the data, summing out the discrete states
true_lp = true_hmm.log_probability(y, inputs=inpt)

Ejemplo n.º 11
0
plt.plot(x[:, 0], x[:, 1])

# In[7]:

mog = MixtureOfGaussians(100, D)
mog.fit(x)

# In[8]:

plt.plot(x[:, 0], x[:, 1])
for mu in mog.observations.mus:
    plt.plot(mu[0], mu[1], 'o')

# In[9]:

arhmm = HMM(K=8, D=D, observations="ar")
arhmm.fit(x)

# In[10]:

z_smpl, x_smpl = arhmm.sample(T=3000)
plt.plot(x_smpl[:, 0], x_smpl[:, 1])

# In[11]:


def sample(T=3000,
           num_samples=25,
           num_burnin=100,
           schedule=None,
           filename="samples.mp4"):