Beispiel #1
0
def test_hmm_likelihood(T=500, K=5, D=2):
    # Create a true HMM
    A = npr.rand(K, K)
    A /= A.sum(axis=1, keepdims=True)
    A = 0.75 * np.eye(K) + 0.25 * A
    C = npr.randn(K, D)
    sigma = 0.01

    # Sample from the true HMM
    z = np.zeros(T, dtype=int)
    y = np.zeros((T, D))
    for t in range(T):
        if t > 0:
            z[t] = np.random.choice(K, p=A[z[t - 1]])
        y[t] = C[z[t]] + np.sqrt(sigma) * npr.randn(D)

    # Compare to pyhsmm answer
    from pyhsmm.models import HMM as OldHMM
    from pyhsmm.basic.distributions import Gaussian
    hmm = OldHMM(
        [Gaussian(mu=C[k], sigma=sigma * np.eye(D)) for k in range(K)],
        trans_matrix=A,
        init_state_distn="uniform")
    true_lkhd = hmm.log_likelihood(y)

    # Make an HMM with these parameters
    hmm = HMM(K, D, observations="gaussian")
    hmm.transitions.log_Ps = np.log(A)
    hmm.observations.mus = C
    hmm.observations.inv_sigmas = np.log(sigma) * np.ones((K, D))
    test_lkhd = hmm.log_probability(y)

    assert np.allclose(true_lkhd, test_lkhd)
 def __init__(self, init_emission_distn=None, **kwargs):
     super(_ARMixin, self).__init__(**kwargs)
     if init_emission_distn is None:
         init_emission_distn = \
                 Gaussian(nu_0=self.P+1,sigma_0=10*self.P*np.eye(self.P),
                     mu_0=np.zeros(self.P),kappa_0=1.)
     self.init_emission_distn = init_emission_distn
Beispiel #3
0
    def _make_model(self):
        data = self.model_data
        # The parameters are:
        # Gaussian observation distributions (ellipses in red-green intensity space)
        # mu_0 and sigma parameterize our prior belief about the means and sigma of each state
        # nu_0 expresses your confidence in the prior--it's the number of data points that
        # you claim got you these prior parameters. Nu_0 has to be strictly bigger than the
        # number of dimensions (2, in our case). You could do 2.01.
        # The nominal covariance is sigma_0/nu_0, so hence the 3 in sigma_0.
        # kappa_0: Uncertainty in the mean should be related to uncertainty in the covariance.
        # kappa_0 is an amplitude for that. Smaller number means other states' means will be
        # further away.
        obs_hypparams = dict(
            mu_0=data.mean(0),
            sigma_0=3. * cov(data),
            nu_0=3.,
            kappa_0=0.5,
        )

        # In the function call below:
        # (1) alpha and gamma bias how many states there are. We're telling it to expect
        # one state (conservative)
        # (2) kappa controls the self-transition bias. Bigger number means becomes more expensive
        # for states to non-self-transition (that is, change to a different state).
        model = WeakLimitStickyHDPHMM(
            alpha=1.,
            gamma=1.,
            init_state_distn='uniform',
            kappa=500.,
            obs_distns=[Gaussian(**obs_hypparams) for _ in range(10)],
        )

        return model
Beispiel #4
0
    def test_hssmm(self):
        import numpy as np
        from matplotlib import pyplot as plt

        from pyhsmm.models import HSMMIntNegBinVariant
        from pyhsmm.basic.models import MixtureDistribution
        from pyhsmm.basic.distributions import Gaussian, NegativeBinomialIntegerRVariantDuration
        from pyhsmm.util.text import progprint_xrange

        #############################
        #  generate synthetic data  #
        #############################

        states_in_hsmm = 5
        components_per_GMM = 3
        component_hyperparameters = dict(mu_0=np.zeros(2), sigma_0=np.eye(2), kappa_0=0.01, nu_0=3)

        GMMs = [MixtureDistribution(
            alpha_0=4.,
            components=[Gaussian(**component_hyperparameters) for i in range(components_per_GMM)])
            for state in range(states_in_hsmm)]

        true_dur_distns = [
            NegativeBinomialIntegerRVariantDuration(np.r_[0., 0, 0, 0, 0, 1, 1, 1], alpha_0=5., beta_0=5.)
            for state in range(states_in_hsmm)]

        truemodel = HSMMIntNegBinVariant(
            init_state_concentration=10.,
            alpha=6., gamma=6.,
            obs_distns=GMMs,
            dur_distns=true_dur_distns)

        training_datas = [truemodel.generate(1000)[0] for i in range(5)]
        test_data = truemodel.generate(5000)[0]

        #####################################
        #  set up FrozenMixture components  #
        #####################################

        # list of all Gaussians
        component_library = [c for m in GMMs for c in m.components]
        library_size = len(component_library)

        # initialize weights to indicator on one component
        init_weights = np.eye(library_size)

        #obs_distns = [FrozenMixtureDistribution(
        #    components=component_library,
        #    alpha_0=4,
        #    weights=row)
        #    for row in init_weights]

        ################
        #  build HSMM  #
        ################

        dur_distns = [NegativeBinomialIntegerRVariantDuration(np.r_[0., 0, 0, 0, 0, 1, 1, 1], alpha_0=5., beta_0=5.)
                      for state in range(library_size)]
Beispiel #5
0
def test_expectations(T=1000, K=20, D=2):
    # Create a true HMM
    A = npr.rand(K, K)
    A /= A.sum(axis=1, keepdims=True)
    A = 0.75 * np.eye(K) + 0.25 * A
    C = npr.randn(K, D)
    sigma = 0.01

    # Sample from the true HMM
    z = np.zeros(T, dtype=int)
    y = np.zeros((T, D))
    for t in range(T):
        if t > 0:
            z[t] = np.random.choice(K, p=A[z[t - 1]])
        y[t] = C[z[t]] + np.sqrt(sigma) * npr.randn(D)

    # Compare to pyhsmm answer
    from pyhsmm.models import HMM as OldHMM
    from pyhsmm.basic.distributions import Gaussian
    hmm = OldHMM(
        [Gaussian(mu=C[k], sigma=sigma * np.eye(D)) for k in range(K)],
        trans_matrix=A,
        init_state_distn="uniform")
    hmm.add_data(y)
    states = hmm.states_list.pop()
    states.E_step()
    true_Ez = states.expected_states
    true_E_trans = states.expected_transcounts

    # Make an HMM with these parameters
    hmm = HMM(K, D, observations="gaussian")
    hmm.transitions.log_Ps = np.log(A)
    hmm.observations.mus = C
    hmm.observations.inv_sigmas = np.log(sigma) * np.ones((K, D))
    test_Ez, test_Ezzp1, _ = hmm.expected_states(y)
    test_E_trans = test_Ezzp1.sum(0)

    print(true_E_trans.round(3))
    print(test_E_trans.round(3))

    assert np.allclose(true_Ez, test_Ez)
    assert np.allclose(true_E_trans, test_E_trans)
from pyhsmm.basic.models import Mixture, MixtureDistribution
from library_models import FrozenMixtureDistribution, LibraryMM
from pyhsmm.basic.distributions import Gaussian, NegativeBinomialIntegerRVariantDuration
from pyhsmm.util.text import progprint_xrange

#############################
#  generate synthetic data  #
#############################

groups_in_metamm = 5
components_per_gmm = 2
component_hyperparameters = dict(mu_0=np.zeros(2),sigma_0=np.eye(2),kappa_0=0.01,nu_0=3)

GMMs = [MixtureDistribution(
    alpha_0=4.,
    components=[Gaussian(**component_hyperparameters) for i in range(components_per_gmm)])
    for state in range(groups_in_metamm)]

truemodel = Mixture(
        alpha_0=6,
        components=GMMs)

data, truelabels = truemodel.generate(2000)

#####################################
#  set up FrozenMixture components  #
#####################################

# list of all Gaussians
component_library = [c for m in GMMs for c in m.components]
library_size = len(component_library)
sigmas = f['sigmas']

all_training_data = alldata[:60000]
training_datas = np.array_split(all_training_data,6)
test_data = alldata[-10000:]

#####################################
#  set up FrozenMixture components  #
#####################################

Nmax = 50

# list of all Gaussians
component_library = \
        [Gaussian(mu=mu,sigma=sigma,
            # hyperparameters not used
            mu_0=np.zeros_like(mu),sigma_0=np.eye(sigma.shape[0]),kappa_0=1.,nu_0=mu.shape[0]+5,
            ) for mu, sigma in zip(means,sigmas)]

library_size = len(component_library)

obs_distns = [FrozenMixtureDistribution(
    components=component_library,
    a_0=1.0,b_0=0.05)
    for i in xrange(Nmax)]

################
#  build HSMM  #
################

dur_distns = [NegativeBinomialIntegerRVariantDuration(np.r_[0.,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1],alpha_0=25.,beta_0=25.)
        for state in range(Nmax)]
#############################
#  generate synthetic data  #
#############################

states_in_hsmm = 5
components_per_GMM = 3
component_hyperparameters = dict(mu_0=np.zeros(2),
                                 sigma_0=np.eye(2),
                                 kappa_0=0.025,
                                 nu_0=3)

GMMs = [
    MixtureDistribution(alpha_0=4.,
                        components=[
                            Gaussian(**component_hyperparameters)
                            for i in range(components_per_GMM)
                        ]) for state in range(states_in_hsmm)
]

true_dur_distns = [
    NegativeBinomialIntegerRVariantDuration(np.r_[0., 0, 0, 0, 0, 0, 1, 1, 1,
                                                  1],
                                            alpha_0=5.,
                                            beta_0=5.)
    for state in range(states_in_hsmm)
]

truemodel = HSMMIntNegBinVariant(init_state_concentration=10.,
                                 alpha=6.,
                                 gamma=2.,
def hmm_analysis(data,
                 num_states=3,
                 num_pca_components=None,
                 verbose=False,
                 evaluation=True,
                 hmm_type='vanilla',
                 anomaly_type='change_points',
                 stickiness=10,
                 alpha=None,
                 gamma=None,
                 mixture_model=False,
                 pca_components_folder=None,
                 transform='no_transform'):

    # If verbose, print to console.
    def verbose_print(arg):
        if verbose:
            print arg

    if anomaly_type != 'change_points':
        raise NotImplementedError

    # Unpack input data.
    counts, energy_range, times = data

    # Transform counts if required.
    counts = Transformation(transform).transform(counts)

    # Apply PCA if we have to.
    apply_pca = num_pca_components is not None and num_pca_components > 0
    if apply_pca:
        verbose_print('Applying PCA to reduce dimensionality to %d.' %
                      num_pca_components)

        if pca_components_folder is None:
            verbose_print('Computing PCA components...')

            pca = PCA(n_components=num_pca_components)
            sequence = pca.fit_transform(counts)

            verbose_print(
                '%0.2f%% of the variance explained by first %d components of PCA.'
                % ((np.sum(
                    pca.fit(counts).
                    explained_variance_ratio_[:num_pca_components]) * 100),
                   num_pca_components))
        else:
            verbose_print('Loading components from folder %s...' %
                          pca_components_folder)
            pca_components_file = pca_components_folder + 'pca%d_components.npy' % num_pca_components

            try:
                pca_components = np.load(pca_components_file)
                sequence = np.matmul(counts - counts.mean(axis=0),
                                     pca_components.T)
            except IOError:
                print 'PCA components file %s not found.' % pca_components_file

    else:
        sequence = counts

    if hmm_type == 'vanilla':
        # Mixture or singlular Gaussian emissions.
        if mixture_model:
            model = VanillaGaussianMixtureHMM(n_components=num_states, n_mix=2)
        else:
            model = VanillaGaussianHMM(n_components=num_states)

    elif hmm_type in ['hdp', 'stickyhdp']:

        resample_over_Dirichlet = False

        # Maximum number of states.
        max_states = num_states

        # Dimensionality of the time-series.
        data_dimensions = sequence.shape[1]

        # Hyperparameters for the observations.
        if mixture_model:
            # Modelling emission probabilities as a mixture of two Gaussians, instead.
            observation_hyperparameters = [{
                'mu_0': np.zeros(data_dimensions),
                'sigma_0': np.eye(data_dimensions),
                'kappa_0': 0.25,
                'nu_0': data_dimensions + 2
            }, {
                'mu_0': np.zeros(data_dimensions),
                'sigma_0': np.eye(data_dimensions),
                'kappa_0': 0.25,
                'nu_0': data_dimensions + 2
            }]
            observation_dist = [
                MixtureDistribution(
                    alpha_0=1.,
                    components=[
                        Gaussian(**observation_hyperparameters[group])
                        for group in range(2)
                    ]) for state in range(max_states)
            ]
        else:
            # Modelling emission probabilities as a Gaussian with a conjugate Normal/Inverse-Wishart prior.
            observation_hyperparameters = {
                'mu_0': np.zeros(data_dimensions),
                'sigma_0': np.eye(data_dimensions),
                'kappa_0': 0.25,
                'nu_0': data_dimensions + 2
            }
            observation_dist = [
                Gaussian(**observation_hyperparameters)
                for state in range(max_states)
            ]

        if resample_over_Dirichlet:
            params = {
                'alpha_a_0': 1.,
                'alpha_b_0': 1. / 4,
                'gamma_a_0': 1.,
                'gamma_b_0': 1. / 4,
                'init_state_concentration': 1,
                'obs_distns': observation_dist
            }
        else:
            params = {
                'alpha': alpha,
                'gamma': gamma,
                'init_state_concentration': 1,
                'obs_distns': observation_dist
            }

        # Create model.
        if hmm_type == 'hdp':
            model = HDPHMM(**params)
        else:
            model = StickyHDPHMM(kappa=stickiness, **params)

    else:
        raise ValueError('Invalid HMM type.')

    # Learn HMM parameters.
    model.fit(sequence)

    # Estimate log-likelihood of sequence conditional on learned parameters.
    log_likelihood = model.log_likelihood()

    # Get posterior distribution over states, conditional on the input.
    states_dist = model.state_distribution()

    # Assign to state with maximum probability at each timestep.
    states = np.argmax(states_dist, axis=1)
    if len(states) != len(sequence):
        raise AssertionError

    # Time factor indicating how long each timestep is approximately in seconds.
    time_factor = round(np.min(np.diff(times)) * 60 * 60 * 24)

    # Remove state subsequences that are very short.
    while True:
        state_intervals = array_to_intervals(states)

        if len(state_intervals) <= 2:
            break

        found_short_subsequence = False
        for state in state_intervals:
            for state_start, state_end in state_intervals[state]:
                if state_end - state_start < 300 / time_factor:
                    found_short_subsequence = True
                    states_dist[state_start:state_end, state] = 0

        if not found_short_subsequence:
            break

        # Fill in missing distributions.
        for timestep, dist in enumerate(states_dist):
            if np.sum(dist) == 0:
                if timestep == 0:
                    states_dist[timestep] = np.ones(len(dist)) / len(dist)
                else:
                    states_dist[timestep] = states_dist[timestep - 1]

        # Recompute after removing.
        states_dist /= np.sum(states_dist, axis=1, keepdims=True)
        states = np.argmax(states_dist, axis=1)

    # Compute 'distances' between states.
    all_emission_params = {
        state: model.emission_params(state)
        for state in range(num_states)
    }
    dissimilarities = np.zeros((num_states, num_states))
    for (index1, params1), (index2, params2) in itertools.product(
            all_emission_params.iteritems(), all_emission_params.iteritems()):
        dissimilarities[index1][index2] = kl_divergence_normals(params1['mu'], params1['sigma'], params2['mu'], params2['sigma']) \
                                        + kl_divergence_normals(params2['mu'], params2['sigma'], params1['mu'], params1['sigma'])

    # Normalize dissimilarities such that the entire matrix sums up to 'num_states',
    # just like the identity matrix would.
    dissimilarities *= num_states / np.sum(dissimilarities)

    # Score as difference in distributions.
    dist_after = states_dist[1:]
    dist_before = states_dist[:-1]
    dist_diff = np.abs(dist_after - dist_before)
    dist_diff_scaled = np.matmul(dist_diff, dissimilarities)

    scores = np.sum(dist_diff * dist_diff_scaled, axis=1)
    scores = np.append([0], scores)

    if evaluation:
        if anomaly_type == 'change_points':
            return times, scores
        else:
            raise NotImplementedError

    return model, states, log_likelihood, states_dist, scores