Exemple #1
0
def greedy_sample(chains,
                  means,
                  covars,
                  n_samples=1,
                  random_state=None,
                  max_cycle_duration=0):
    assert means.shape[0] == covars.shape[0]

    # Greedy sample algorithm as described by Takano et al.
    n_chains = len(chains)
    n_features = means.shape[-1]
    states = np.zeros((n_samples, n_chains), dtype=int)
    for chain_idx, chain in enumerate(chains):
        states[0, chain_idx] = np.argmax(chain._log_startprob)
        t = 1
        while t < n_samples:
            prev_state = states[t - 1, chain_idx]

            # Stay in state until duration is over or until we reach the n_samples limit
            trans_prob_cycle = np.exp(chain._log_transmat[prev_state,
                                                          prev_state])
            if trans_prob_cycle == 1.0:
                trans_prob_cycle -= np.finfo(float).eps
            assert 0.0 <= trans_prob_cycle < 1.0
            duration = int(
                np.floor(
                    min(min(1.0 / (1.0 - trans_prob_cycle), n_samples - t),
                        max_cycle_duration)))
            for d in xrange(duration):
                states[t + d, chain_idx] = prev_state
            t += duration
            if t >= n_samples:
                continue

            # Get argmax of transition probability of previous state but ignore transition from prev_state -> prev_state
            state = None
            for idx, val in enumerate(chain._log_transmat[prev_state]):
                if idx != prev_state and (
                        state is None
                        or val > chain._log_transmat[prev_state, state]):
                    state = idx
            assert state is not None
            assert state != prev_state
            states[t, chain_idx] = state
            t += 1

    obs = np.zeros((n_samples, n_features))
    for t in xrange(n_samples):
        state_combination = tuple(states[t])
        mean = means[state_combination]
        covar = covars[state_combination]
        obs[t] = sample_gaussian(mean,
                                 covar,
                                 'diag',
                                 random_state=random_state)
    return obs
Exemple #2
0
    def samples_and_comps(self, n_samples=1, random_state=None):
        """Generate random samples from the model.

        Parameters
        ----------
        n_samples : int, optional
            Number of samples to generate. Defaults to 1.
        get_comp: bool, optional
            If True, return the component from which each
            sample was drawn

        Returns
        -------
        (X,comps)

        X : array_like, shape (n_samples, n_features)
            List of samples

        comp: array, shape n_samples
            List of components
        """
        import numpy as np
        from sklearn.mixture.gmm import sample_gaussian
        from sklearn.utils import check_random_state

        if random_state is None:
            random_state = self.random_state
        random_state = check_random_state(random_state)

        weight_cdf = np.cumsum(self.weights_)

        X = np.empty((n_samples, self.means_.shape[1]))
        rand = random_state.rand(n_samples)
        # decide which component to use for each sample
        comps = weight_cdf.searchsorted(rand)
        # for each component, generate all needed samples
        for comp in range(self.n_components):
            # occurrences of current component in X
            comp_in_X = (comp == comps)
            # number of those occurrences
            num_comp_in_X = comp_in_X.sum()
            if num_comp_in_X > 0:
                if self.covariance_type == 'tied':
                    cv = self.covars_
                elif self.covariance_type == 'spherical':
                    cv = self.covars_[comp][0]
                else:
                    cv = self.covars_[comp]
                X[comp_in_X] = sample_gaussian(
                    self.means_[comp], cv, self.covariance_type,
                    num_comp_in_X, random_state=random_state).T

        return X, comps
def greedy_sample(chains, means, covars, n_samples=1, random_state=None, max_cycle_duration=0):
    assert means.shape[0] == covars.shape[0]

    # Greedy sample algorithm as described by Takano et al.
    n_chains = len(chains)
    n_features = means.shape[-1]
    states = np.zeros((n_samples, n_chains), dtype=int)
    for chain_idx, chain in enumerate(chains):
        states[0, chain_idx] = np.argmax(chain._log_startprob)
        t = 1
        while t < n_samples:
            prev_state = states[t - 1, chain_idx]

            # Stay in state until duration is over or until we reach the n_samples limit
            trans_prob_cycle = np.exp(chain._log_transmat[prev_state, prev_state])
            if trans_prob_cycle == 1.0:
                trans_prob_cycle -= np.finfo(float).eps
            assert 0.0 <= trans_prob_cycle < 1.0
            duration = int(np.floor(min(min(1.0 / (1.0 - trans_prob_cycle), n_samples-t), max_cycle_duration)))
            for d in xrange(duration):
                states[t + d, chain_idx] = prev_state
            t += duration
            if t >= n_samples:
                continue

            # Get argmax of transition probability of previous state but ignore transition from prev_state -> prev_state
            state = None
            for idx, val in enumerate(chain._log_transmat[prev_state]):
                if idx != prev_state and (state is None or val > chain._log_transmat[prev_state, state]):
                    state = idx
            assert state is not None
            assert state != prev_state
            states[t, chain_idx] = state
            t += 1

    obs = np.zeros((n_samples, n_features))
    for t in xrange(n_samples):
        state_combination = tuple(states[t])
        mean = means[state_combination]
        covar = covars[state_combination]
        obs[t] = sample_gaussian(mean, covar, 'diag', random_state=random_state)
    return obs