def DefaultBernoulliLDS(D_obs, D_latent, D_input=0, mu_init=None, sigma_init=None, A=None, B=None, sigma_states=None, C=None, D=None ): model = LaplaceApproxBernoulliLDS( init_dynamics_distn= Gaussian(mu_0=np.zeros(D_latent), sigma_0=np.eye(D_latent), kappa_0=1.0, nu_0=D_latent + 1), dynamics_distn=Regression( nu_0=D_latent + 1, S_0=D_latent * np.eye(D_latent), M_0=np.zeros((D_latent, D_latent + D_input)), K_0=D_latent * np.eye(D_latent + D_input)), emission_distn= BernoulliRegression(D_obs, D_latent + D_input, verbose=False)) set_default = \ lambda prm, val, default: \ model.__setattr__(prm, val if val is not None else default) set_default("mu_init", mu_init, np.zeros(D_latent)) set_default("sigma_init", sigma_init, np.eye(D_latent)) set_default("A", A, 0.99 * random_rotation(D_latent)) set_default("B", B, 0.1 * np.random.randn(D_latent, D_input)) set_default("sigma_states", sigma_states, 0.1 * np.eye(D_latent)) set_default("C", C, np.random.randn(D_obs, D_latent)) set_default("D", D, 0.1 * np.random.randn(D_obs, D_input)) return model
def DefaultLDS(D_obs, D_latent, D_input=0, mu_init=None, sigma_init=None, A=None, B=None, sigma_states=None, C=None, D=None, sigma_obs=None): model = LDS( dynamics_distn=Regression( nu_0=D_latent + 1, S_0=D_latent * np.eye(D_latent), M_0=np.zeros((D_latent, D_latent + D_input)), K_0=D_latent * np.eye(D_latent + D_input)), emission_distn=Regression( nu_0=D_obs + 1, S_0=D_obs * np.eye(D_obs), M_0=np.zeros((D_obs, D_latent + D_input)), K_0=D_obs * np.eye(D_latent + D_input))) set_default = \ lambda prm, val, default: \ model.__setattr__(prm, val if val is not None else default) set_default("mu_init", mu_init, np.zeros(D_latent)) set_default("sigma_init", sigma_init, np.eye(D_latent)) set_default("A", A, 0.99 * random_rotation(D_latent)) set_default("B", B, 0.1 * np.random.randn(D_latent, D_input)) set_default("sigma_states", sigma_states, 0.1 * np.eye(D_latent)) set_default("C", C, np.random.randn(D_obs, D_latent)) set_default("D", D, 0.1 * np.random.randn(D_obs, D_input)) set_default("sigma_obs", sigma_obs, 0.1 * np.eye(D_obs)) return model
def DefaultPoissonLDS( D_obs, D_latent, D_input=0, mu_init=None, sigma_init=None, A=None, B=None, sigma_states=None, C=None, d=None, ): assert D_input == 0, "Inputs are not yet supported for Poisson LDS" model = LaplaceApproxPoissonLDS( init_dynamics_distn=Gaussian(mu_0=np.zeros(D_latent), sigma_0=np.eye(D_latent), kappa_0=1.0, nu_0=D_latent + 1), dynamics_distn=Regression(A=0.9 * np.eye(D_latent), sigma=np.eye(D_latent), nu_0=D_latent + 1, S_0=D_latent * np.eye(D_latent), M_0=np.zeros((D_latent, D_latent)), K_0=D_latent * np.eye(D_latent)), emission_distn=PoissonRegression(D_obs, D_latent, verbose=False)) set_default = \ lambda prm, val, default: \ model.__setattr__(prm, val if val is not None else default) set_default("mu_init", mu_init, np.zeros(D_latent)) set_default("sigma_init", sigma_init, np.eye(D_latent)) set_default("A", A, 0.99 * random_rotation(D_latent)) set_default("B", B, 0.1 * np.random.randn(D_latent, D_input)) set_default("sigma_states", sigma_states, 0.1 * np.eye(D_latent)) set_default("C", C, np.random.randn(D_obs, D_latent)) set_default("d", d, np.zeros((D_obs, 1))) return model
def make_rslds_parameters(C_init): init_dynamics_distns = [ Gaussian( mu=np.zeros(D_latent), sigma=3 * np.eye(D_latent), nu_0=D_latent + 2, sigma_0=3. * np.eye(D_latent), mu_0=np.zeros(D_latent), kappa_0=1.0, ) for _ in range(K) ] ths = np.random.uniform(np.pi / 30., 1.0, size=K) As = [random_rotation(D_latent, th) for th in ths] As = [np.hstack((A, np.ones((D_latent, 1)))) for A in As] dynamics_distns = [ Regression( A=As[k], sigma=np.eye(D_latent), nu_0=D_latent + 1000, S_0=np.eye(D_latent), M_0=np.hstack((np.eye(D_latent), np.zeros((D_latent, 1)))), K_0=np.eye(D_latent + 1), ) for k in range(K) ] if C_init is not None: emission_distns = \ DiagonalRegression(D_obs, D_latent + 1, A=C_init.copy(), sigmasq=np.ones(D_obs), alpha_0=2.0, beta_0=2.0) else: emission_distns = \ DiagonalRegression(D_obs, D_latent + 1, alpha_0=2.0, beta_0=2.0) return init_dynamics_distns, dynamics_distns, emission_distns
from pylds.util import random_rotation from pyslds.models import DefaultSLDS npr.seed(0) # Set parameters K = 5 D_obs = 100 D_latent = 2 D_input = 1 T = 1000 # Make an LDS with known parameters true_mu_inits = [np.ones(D_latent) for _ in range(K)] true_sigma_inits = [np.eye(D_latent) for _ in range(K)] true_As = [.9 * random_rotation(D_latent) for k in range(K)] true_Bs = [3 * npr.randn(D_latent, D_input) for k in range(K)] true_sigma_states = [np.eye(D_latent) for _ in range(K)] true_C = np.random.randn(D_obs, D_latent) true_Ds = np.zeros((D_obs, D_input)) true_sigma_obs = np.eye(D_obs) true_model = DefaultSLDS( K, D_obs, D_latent, D_input=D_input, mu_inits=true_mu_inits, sigma_inits=true_sigma_inits, As=true_As, Bs=true_Bs, sigma_statess=true_sigma_states, Cs=true_C, Ds=true_Ds, sigma_obss=true_sigma_obs) # Simulate some data with a given discrete state sequence inputs = np.ones((T, D_input)) z = np.arange(K).repeat(T // K)
from pylds.util import random_rotation from pyslds.models import DefaultSLDS npr.seed(0) # Set parameters K = 5 D_obs = 100 D_latent = 2 D_input = 1 T = 1000 # Make an LDS with known parameters true_mu_inits = [np.ones(D_latent) for _ in range(K)] true_sigma_inits = [np.eye(D_latent) for _ in range(K)] true_As = [.9 * random_rotation(D_latent) for k in range(K)] true_Bs = [3 * npr.randn(D_latent, D_input) for k in range(K)] true_sigma_states = [np.eye(D_latent) for _ in range(K)] true_C = np.random.randn(D_obs, D_latent) true_Ds = np.zeros((D_obs, D_input)) true_sigma_obs = np.eye(D_obs) true_model = DefaultSLDS(K, D_obs, D_latent, D_input=D_input, mu_inits=true_mu_inits, sigma_inits=true_sigma_inits, As=true_As, Bs=true_Bs, sigma_statess=true_sigma_states, Cs=true_C,
from pylds.util import random_rotation from pyslds.models import DefaultSLDS npr.seed(0) # Set parameters K = 5 D_obs = 10 D_latent = 2 D_input = 1 T = 1000 # Make an LDS with known parameters true_mu_inits = [np.ones(D_latent) for _ in range(K)] true_sigma_inits = [np.eye(D_latent) for _ in range(K)] true_As = [.99 * random_rotation(D_latent, theta=np.pi/((k+1) * 4)) for k in range(K)] true_Bs = [3 * npr.randn(D_latent, D_input) for k in range(K)] true_sigma_states = [np.eye(D_latent) for _ in range(K)] true_C = np.random.randn(D_obs, D_latent) true_Ds = np.zeros((D_obs, D_input)) true_sigma_obs = 0.05 * np.eye(D_obs) true_model = DefaultSLDS( K, D_obs, D_latent, D_input=D_input, mu_inits=true_mu_inits, sigma_inits=true_sigma_inits, As=true_As, Bs=true_Bs, sigma_statess=true_sigma_states, Cs=true_C, Ds=true_Ds, sigma_obss=true_sigma_obs) # Simulate some data with a given discrete state sequence inputs = np.ones((T, D_input)) z = np.arange(K).repeat(T // K) y, x, _ = true_model.generate(T, inputs=inputs, stateseq=z)
def simulate_nascar(): assert K_true == 4 As = [ random_rotation(D_latent, np.pi / 24.), random_rotation(D_latent, np.pi / 48.) ] # Set the center points for each system centers = [np.array([+2.0, 0.]), np.array([-2.0, 0.])] bs = [ -(A - np.eye(D_latent)).dot(center) for A, center in zip(As, centers) ] # Add a "right" state As.append(np.eye(D_latent)) bs.append(np.array([+0.1, 0.])) # Add a "right" state As.append(np.eye(D_latent)) bs.append(np.array([-0.25, 0.])) # Construct multinomial regression to divvy up the space w1, b1 = np.array([+1.0, 0.0]), np.array([-2.0]) # x + b > 0 -> x > -b w2, b2 = np.array([-1.0, 0.0]), np.array([-2.0]) # -x + b > 0 -> x < b w3, b3 = np.array([0.0, +1.0]), np.array([0.0]) # y > 0 w4, b4 = np.array([0.0, -1.0]), np.array([0.0]) # y < 0 reg_W = np.column_stack((100 * w1, 100 * w2, 10 * w3, 10 * w4)) reg_b = np.concatenate((100 * b1, 100 * b2, 10 * b3, 10 * b4)) # Make a recurrent SLDS with these params # dynamics_distns = [ Regression( A=np.column_stack((A, b)), sigma=1e-4 * np.eye(D_latent), nu_0=D_latent + 2, S_0=1e-4 * np.eye(D_latent), M_0=np.zeros((D_latent, D_latent + 1)), K_0=np.eye(D_latent + 1), ) for A, b in zip(As, bs) ] init_dynamics_distns = [ Gaussian(mu=np.array([0.0, 1.0]), sigma=1e-3 * np.eye(D_latent)) for _ in range(K) ] C = np.hstack((npr.randn(D_obs, D_latent), np.zeros((D_obs, 1)))) emission_distns = \ DiagonalRegression(D_obs, D_latent+1, A=C, sigmasq=1e-5 *np.ones(D_obs), alpha_0=2.0, beta_0=2.0) model = SoftmaxRecurrentOnlySLDS(trans_params=dict(W=reg_W, b=reg_b), init_state_distn='uniform', init_dynamics_distns=init_dynamics_distns, dynamics_distns=dynamics_distns, emission_distns=emission_distns, alpha=3.) ######################### # Sample from the model # ######################### inputs = np.ones((T, 1)) y, x, z = model.generate(T=T, inputs=inputs) # Maks off some data if mask_start == mask_stop: mask = None else: mask = np.ones((T, D_obs), dtype=bool) mask[mask_start:mask_stop] = False # Print the true parameters np.set_printoptions(precision=2) print("True W:\n{}".format(model.trans_distn.W)) print("True logpi:\n{}".format(model.trans_distn.logpi)) return model, inputs, z, x, y, mask