Example #1
0
def fit_lds_gibbs(seq, inputs, guessed_dim, num_update_samples):
    """Fits LDS model via Gibbs sampling and EM. Returns fitted eigenvalues."""
    if inputs is None:
        model = DefaultLDS(D_obs=1, D_latent=guessed_dim, D_input=0)
    else:
        model = DefaultLDS(D_obs=1, D_latent=guessed_dim, D_input=1)
    model.add_data(seq, inputs=inputs)
    ll = np.zeros(num_update_samples)
    # Run the Gibbs sampler
    for i in xrange(num_update_samples):
        try:
            model.resample_model()
        except AssertionError as e:
            warnings.warn(str(e), sm_exceptions.ConvergenceWarning)
            eigs = np.linalg.eigvals(model.A)
            return eigs[np.argsort(np.abs(eigs))[::-1]]
        ll[i] = model.log_likelihood()
    # Rough estimate of convergence: judge converged if the change of maximum
    # log likelihood is less than tolerance.
    recent_steps = int(num_update_samples / 10)
    tol = 1.0
    if np.max(ll[-recent_steps:]) - np.max(ll[:-recent_steps]) > tol:
        warnings.warn(
            'Questionable convergence. Log likelihood values: ' + str(ll),
            sm_exceptions.ConvergenceWarning)
    eigs = np.linalg.eigvals(model.A)
    return eigs[np.argsort(eigs.real)[::-1]]
Example #2
0
    def initialize_from_gaussian_lds(self, N_samples=100):
        """
        Initialize z, A, C, sigma_states using a Gaussian LDS
        :return:
        """
        from pylds.models import DefaultLDS
        init_model = DefaultLDS(n=self.n, p=self.K)

        for data in self.data_list:
            init_model.add_data(data["x"])

        print("Initializing with Gaussian LDS")
        for smpl in range(20):
            init_model.resample_model()

        # Use the init model's parameters
        self.A = init_model.A.copy()
        self.C = init_model.C[:self.K - 1, :].copy()
        self.sigma_states = init_model.sigma_states.copy()
        self.mu_init = init_model.mu_init.copy()
        self.sigma_init = init_model.sigma_init.copy()

        # Use the init model's latent state sequences too
        for data, init_data in zip(self.data_list, init_model.states_list):
            data["z"] = init_data.stateseq.copy()

        # Now resample omega
        self.emission_distn.resample_omega(self.data_list)
Example #3
0
File: lds.py Project: HIPS/pgmult
    def initialize_from_gaussian_lds(self, N_samples=100):
        """
        Initialize z, A, C, sigma_states using a Gaussian LDS
        :return:
        """
        from pylds.models import DefaultLDS
        init_model = DefaultLDS(n=self.n, p=self.K)

        for data in self.data_list:
            init_model.add_data(data["x"])

        print("Initializing with Gaussian LDS")
        for smpl in range(20):
            init_model.resample_model()

        # Use the init model's parameters
        self.A = init_model.A.copy()
        self.C = init_model.C[:self.K-1,:].copy()
        self.sigma_states = init_model.sigma_states.copy()
        self.mu_init = init_model.mu_init.copy()
        self.sigma_init = init_model.sigma_init.copy()

        # Use the init model's latent state sequences too
        for data, init_data in zip(self.data_list, init_model.states_list):
            data["z"] = init_data.stateseq.copy()

        # Now resample omega
        self.emission_distn.resample_omega(self.data_list)
Example #4
0
def fit_gaussian_lds_model(Xs, N_samples=100):
    testmodel = DefaultLDS(n=D,p=K)

    for X in Xs:
        testmodel.add_data(X)

    samples = []
    lls = []
    for smpl in progprint_xrange(N_samples):
        testmodel.resample_model()

        samples.append(testmodel.copy_sample())
        lls.append(testmodel.log_likelihood())

    lls = np.array(lls)
    return lls
Example #5
0
def fit_gaussian_lds_model(Xs, N_samples=100):
    testmodel = DefaultLDS(n=D, p=K)

    for X in Xs:
        testmodel.add_data(X)

    samples = []
    lls = []
    for smpl in progprint_xrange(N_samples):
        testmodel.resample_model()

        samples.append(testmodel.copy_sample())
        lls.append(testmodel.log_likelihood())

    lls = np.array(lls)
    return lls
Example #6
0
D_latent = 2
D_input = 0
T = 2000

# Simulate from one LDS
truemodel = DefaultLDS(D_obs, D_latent, D_input)
inputs = np.random.randn(T, D_input)
data, stateseq = truemodel.generate(T, inputs=inputs)

# Fit with another LDS
model = DefaultLDS(D_obs, D_latent, D_input)
model.add_data(data, inputs=inputs)

# Initialize with a few iterations of Gibbs
for _ in progprint_xrange(10):
    model.resample_model()


# Run EM
def update(model):
    model.EM_step()
    return model.log_likelihood()


lls = [update(model) for _ in progprint_xrange(50)]

# Plot the log likelihoods
plt.figure()
plt.plot(lls)
plt.xlabel('iteration')
plt.ylabel('training likelihood')
Example #7
0
    emission_distn=Regression(A=C,sigma=sigma_obs))

data, stateseq = truemodel.generate(2000)


###############
#  fit model  #
###############

def update(model):
    return model.meanfield_coordinate_descent_step()

model = DefaultLDS(n=2,p=data.shape[1]).add_data(data)

for _ in progprint_xrange(100):
    model.resample_model()

vlbs = [update(model) for _ in progprint_xrange(50)]

plt.figure(figsize=(3,4))
plt.plot(vlbs)
plt.xlabel('iteration')
plt.ylabel('variational lower bound')


################
#  predicting  #
################

Npredict = 100
prediction_seed = data[:1700]