def __init__(self,
                 K,
                 D,
                 M=0,
                 transformation='linear',
                 mus_init=None,
                 sigmas=None,
                 lags=1,
                 train_sigma=True):
        super(ARGaussianObservation, self).__init__(K, D, M)

        if mus_init is None:
            self.mus_init = torch.zeros(self.K, self.D, dtype=torch.float64)
        else:
            self.mus_init = check_and_convert_to_tensor(mus_init)

        # consider diagonal covariance
        self.log_sigmas_init = torch.tensor(np.log(np.ones((K, D))),
                                            dtype=torch.float64)

        if sigmas is None:
            self.log_sigmas = nn.Parameter(torch.tensor(np.log(5 * np.ones(
                (K, D))),
                                                        dtype=torch.float64),
                                           requires_grad=train_sigma)
        else:
            # TODO: assert sigmas positive
            assert sigmas.shape == (self.K, self.D)
            self.log_sigmas = nn.Parameter(torch.tensor(np.log(sigmas),
                                                        dtype=torch.float64),
                                           requires_grad=True)

        self.lags = lags

        if isinstance(transformation, str):
            if transformation == 'linear':
                self.transformation = LinearTransformation(K=self.K,
                                                           D=self.D,
                                                           lags=self.lags)
        else:
            assert isinstance(transformation, BaseTransformation)
            self.transformation = transformation
            self.lags = self.transformation.lags
Exemplo n.º 2
0
from ssm_ptc.observations.ar_logit_normal_observation import ARLogitNormalObservation
from ssm_ptc.observations.ar_truncated_normal_observation import ARTruncatedNormalObservation
from ssm_ptc.transformations.linear import LinearTransformation
from ssm_ptc.transformations.constrained_linear import ConstrainedLinearTransformation
from ssm_ptc.utils import k_step_prediction

torch.manual_seed(0)
np.random.seed(0)

K = 3
D = 2
lags = 10

# AR Gaussian

trans1 = LinearTransformation(K=K, D=D, lags=lags)
obs1 = ARGaussianObservation(K=K,
                             D=D,
                             transformation=trans1,
                             train_sigma=False)
model1 = HMM(K=K, D=D, observation=obs1)

model2 = HMM(K=K, D=D, observation_kwargs={"lags": lags})

#print(model1.params == model2.params)

model2.params = model1.params

for p1, p2 in zip(model1.params_unpack, model2.params_unpack):
    assert torch.all(torch.eq(p1, p2))
Exemplo n.º 3
0
data = torch.randn(T, D, dtype=torch.float64)

lags = 1

bounds = np.array([[-2, 2], [0, 1], [-2, 2], [0, 1]])

As = np.array([
    np.column_stack([np.identity(D),
                     np.zeros((D, (lags - 1) * D))]) for _ in range(K)
])

torch.manual_seed(0)
np.random.seed(0)

tran = LinearTransformation(K=K, D=D, lags=lags, As=As)
observation = ARTruncatedNormalObservation(K=K,
                                           D=D,
                                           M=0,
                                           transformation=tran,
                                           bounds=bounds)

model = HMM(K=K, D=D, M=0, observation=observation)

lls = model.log_likelihood(data)
print(lls)

#losses_1, optimizer_1 = model_1.fit(data_1, method='adam', num_iters=2000, lr=0.001)

z_1 = model.most_likely_states(data)
    def __init__(self,
                 K,
                 D,
                 M=0,
                 transformation='linear',
                 lags=1,
                 mus_init=None,
                 sigmas=None,
                 bounds=None,
                 train_sigma=True):
        """
        x ~ N(mu, sigma)
        h_1 = sigmoid(h)
        x_tran = (upperbound - lowerbound) * h_1 + lowerbound

        :param K: number of hidden states
        :param D: dimension of the observation
        :param M: dimension of the input
        :param transformation: used for auto-regressive relationship. transforms the previous observations into
         parameters of the emission distribution
        :param mus_init: (K, D)
        :param sigmas: (K, D) -- recover the diagonal covariance of shape (K,D,D)
        :param bounds: (D, 2) -- lower and upper bounds for each dimension of the observation
        """
        super(ARTruncatedNormalObservation, self).__init__(K, D, M)

        if isinstance(transformation, str):
            if transformation == 'linear':
                self.transformation = LinearTransformation(K=self.K,
                                                           D=self.D,
                                                           lags=lags)
                self.lags = lags
        else:
            assert isinstance(transformation, BaseTransformation)
            self.transformation = transformation
            self.lags = self.transformation.lags

        # consider diagonal covariance
        if sigmas is None:
            log_sigmas = torch.tensor(np.log(np.ones((K, D))),
                                      dtype=torch.float64)
        else:
            # TODO: assert sigmas positive
            assert sigmas.shape == (self.K, self.D)
            log_sigmas = check_and_convert_to_tensor(np.log(sigmas),
                                                     dtype=torch.float64)
        self.log_sigmas = nn.Parameter(log_sigmas, requires_grad=train_sigma)

        if bounds is None:
            raise ValueError("Please provide bounds.")
            # default bound for each dimension is [0,1]
            #self.bounds = torch.cat((torch.zeros(self.D, dtype=torch.float64)[:, None],
            #                         torch.ones(self.D, dtype=torch.float64)[:, None]), dim=1)
        else:
            self.bounds = check_and_convert_to_tensor(bounds,
                                                      dtype=torch.float64)
            assert self.bounds.shape == (self.D, 2)

        if mus_init is None:
            self.mus_init = torch.eye(self.K, self.D, dtype=torch.float64)
        else:
            self.mus_init = torch.tensor(mus_init, dtype=torch.float64)

        # consider diagonal covariance
        self.log_sigmas_init = torch.tensor(np.log(np.ones((K, D))),
                                            dtype=torch.float64)
class ARTruncatedNormalObservation(BaseObservation):
    """
    A mixture of distributions
    """
    def __init__(self,
                 K,
                 D,
                 M=0,
                 transformation='linear',
                 lags=1,
                 mus_init=None,
                 sigmas=None,
                 bounds=None,
                 train_sigma=True):
        """
        x ~ N(mu, sigma)
        h_1 = sigmoid(h)
        x_tran = (upperbound - lowerbound) * h_1 + lowerbound

        :param K: number of hidden states
        :param D: dimension of the observation
        :param M: dimension of the input
        :param transformation: used for auto-regressive relationship. transforms the previous observations into
         parameters of the emission distribution
        :param mus_init: (K, D)
        :param sigmas: (K, D) -- recover the diagonal covariance of shape (K,D,D)
        :param bounds: (D, 2) -- lower and upper bounds for each dimension of the observation
        """
        super(ARTruncatedNormalObservation, self).__init__(K, D, M)

        if isinstance(transformation, str):
            if transformation == 'linear':
                self.transformation = LinearTransformation(K=self.K,
                                                           D=self.D,
                                                           lags=lags)
                self.lags = lags
        else:
            assert isinstance(transformation, BaseTransformation)
            self.transformation = transformation
            self.lags = self.transformation.lags

        # consider diagonal covariance
        if sigmas is None:
            log_sigmas = torch.tensor(np.log(np.ones((K, D))),
                                      dtype=torch.float64)
        else:
            # TODO: assert sigmas positive
            assert sigmas.shape == (self.K, self.D)
            log_sigmas = check_and_convert_to_tensor(np.log(sigmas),
                                                     dtype=torch.float64)
        self.log_sigmas = nn.Parameter(log_sigmas, requires_grad=train_sigma)

        if bounds is None:
            raise ValueError("Please provide bounds.")
            # default bound for each dimension is [0,1]
            #self.bounds = torch.cat((torch.zeros(self.D, dtype=torch.float64)[:, None],
            #                         torch.ones(self.D, dtype=torch.float64)[:, None]), dim=1)
        else:
            self.bounds = check_and_convert_to_tensor(bounds,
                                                      dtype=torch.float64)
            assert self.bounds.shape == (self.D, 2)

        if mus_init is None:
            self.mus_init = torch.eye(self.K, self.D, dtype=torch.float64)
        else:
            self.mus_init = torch.tensor(mus_init, dtype=torch.float64)

        # consider diagonal covariance
        self.log_sigmas_init = torch.tensor(np.log(np.ones((K, D))),
                                            dtype=torch.float64)

    def _compute_mus_based_on(self, data):
        # not tested.
        # TODO: test this method
        mus = self.transformation.transform(data)
        return data

    def _compute_mus_for(self, data, **kwargs):
        """
        compute the mean vector for each observation (using the previous observation, or mus_init)
        :param data: (T,D)
        :return: mus: (T, K, D)
        """
        T, D = data.shape
        assert D == self.D

        if T < self.lags:
            mus = self.mus_init * torch.ones(
                T, self.K, self.D, dtype=torch.float64)
        else:
            mus_rest = self.transformation.transform(
                data[:-1], **kwargs)  # (T-momentum_lags, K, D)
            assert mus_rest.shape == (T - 1 - self.lags + 1, self.K, D)

            mus = torch.cat(
                (self.mus_init *
                 torch.ones(self.lags, self.K, self.D, dtype=torch.float64),
                 mus_rest))

        assert mus.shape == (T, self.K, self.D)
        return mus

    def log_prob(self, data, **kwargs):
        """

        :param data: shape (T, D)
        :return: log prob under each possible z_t: shape (T, K)
        """

        mus = self._compute_mus_for(data, **kwargs)  # (T, K, D)
        T = data.shape[0]

        p_init = TruncatedNormal(mus=mus[0],
                                 log_sigmas=self.log_sigmas_init,
                                 bounds=self.bounds)  # mus[0] (K, D)
        log_prob_init = p_init.log_prob(
            data[0])  # data[0] (D, ). log_prob_init: (K, D)
        log_prob_init = torch.sum(log_prob_init, dim=-1)  # (K, )

        if T == 1:
            return log_prob_init[None, ]

        dist = TruncatedNormal(mus=mus[1:],
                               log_sigmas=self.log_sigmas,
                               bounds=self.bounds)

        log_prob_ar = dist.log_prob(data[1:, None])  # (T-1, K, D)
        log_prob_ar = torch.sum(log_prob_ar, dim=-1)  # (T-1, K)

        return torch.cat((log_prob_init[None, ], log_prob_ar))

    def sample_x(self, z, xhist=None, with_noise=False, return_np=True):
        """

        :param z: ()
        :param xhist: (T_pre, D)
        :param return_np:
        :return: x: shape (D, )
        """

        # currently only support non-reparameterizable rejection sampling

        # no previous x
        if xhist is None or xhist.shape[0] < self.lags:
            mu = self.mus_init[z]  # (D,)
        else:
            # sample from the autoregressive distribution
            assert len(xhist.shape) == 2
            mu = self.transformation.transform_condition_on_z(
                z, xhist[-self.lags:])  # (D, )
            assert mu.shape == (self.D, )

        if with_noise:
            samples = mu
            # some ad-hoc way to address bound issue
            for d in range(self.D):
                if samples[d] <= self.bounds[d, 0]:
                    samples[d] = self.bounds[d, 0] + 0.1 * torch.rand(
                        1, dtype=torch.float64)
                elif samples[d] >= self.bounds[d, 1]:
                    samples[d] = self.bounds[d, 1] - 0.1 * torch.rand(
                        1, dtype=torch.float64)

            samples = samples.detach()

        else:
            dist = TruncatedNormal(mus=mu,
                                   log_sigmas=self.log_sigmas[z],
                                   bounds=self.bounds)
            samples = dist.sample()
        if return_np:
            return samples.numpy()
        return samples

    def rsample_x(self, z, xhist=None, with_noise=False):
        """
        generate reparameterized samples
        :param z: shape ()
        :param xhist: shape (T_pre, D)
        :return: x: shape (D,)
        """
        # TODO: add rejection sampling
        raise NotImplementedError
Exemplo n.º 6
0
        [3, 8])  # list of length 2, each item is an array (T, 2). T = 36000
    rendered_data.append(np.concatenate(
        (session_data), axis=1))  # each item is an array (T, 4)
trajectories = np.concatenate(rendered_data, axis=0)  # (T*30, 4)

traj29 = rendered_data[29]

arena_xmax = 320
arena_ymax = 370

bounds = np.array([[-10, arena_xmax + 10], [-10, arena_ymax + 10],
                   [-10, arena_xmax + 10], [-10, arena_ymax + 10]])

K = 2
D = 4
T = 36000

tran = LinearTransformation(K=K, D=D, lags=1)
observation = ARTruncatedNormalObservation(K=K,
                                           D=D,
                                           M=0,
                                           transformation=tran,
                                           bounds=bounds)

model = HMM(K=K, D=D, M=0, observation=observation)

data = torch.tensor(traj29[:5], dtype=torch.float64)

out = model.log_likelihood(data)
print(out)
Exemplo n.º 7
0
    rendered_data.append(np.concatenate(
        (session_data), axis=1))  # each item is an array (T, 4)
trajectories = np.concatenate(rendered_data, axis=0)  # (T*30, 4)

traj29 = rendered_data[29]

arena_xmax = 320
arena_ymax = 370

K = 2
D = 4
T = 36000

bounds = np.array([[-10, arena_xmax + 10], [-10, arena_ymax + 10],
                   [-10, arena_xmax + 10], [-10, arena_ymax + 10]])

#bounds = np.array([[-300, 300], [-300, 300], [-300, 300], [-300, 300]])

tran = LinearTransformation(K=K, D=D, lags=10, use_bias=True)
observation = ARLogitNormalObservation(K=K,
                                       D=D,
                                       M=0,
                                       transformation=tran,
                                       bounds=bounds,
                                       alpha=0.5)

model = HMM(K=K, D=D, M=0, observation=observation)

data = torch.tensor(traj29[:10000], dtype=torch.float64)

model.log_likelihood(data)
cmap = gradient_cmap(colors)

# generate synthetic data
K = 3
D = 2
T = 2
lags = 10

torch.manual_seed(0)
npr.seed(0)

bounds = np.array([[0, 20], [0, 40]])
thetas = np.linspace(0, 2 * np.pi, K, endpoint=False)
mus_init = 3 * np.column_stack((np.cos(thetas), np.sin(thetas))) + 5

true_tran = LinearTransformation(K=K, D=D, lags=lags)
true_observation = ARTruncatedNormalObservation(K=K,
                                                D=D,
                                                M=0,
                                                lags=lags,
                                                transformation=true_tran,
                                                bounds=bounds,
                                                mus_init=mus_init,
                                                train_sigma=False)
true_model = HMM(K=K, D=D, M=0, observation=true_observation)

z, x = true_model.sample(T, return_np=False)
true_ll = true_model.log_likelihood(x)

print(true_ll)
import torch
import numpy as np
import matplotlib.pyplot as plt

from tqdm import tqdm_notebook as tqdm
import sys

torch.manual_seed(0)

K = 3
D = 2
T = 100

As = [random_rotation(D) for _ in range(K)]
true_tran = LinearTransformation(K=K, d_in=D, D=D, As=As)

bounds = np.array([[0, 20], [-5, 25]])
true_observation = ARLogitNormalObservation(K=K,
                                            D=D,
                                            M=0,
                                            transformation=true_tran,
                                            bounds=bounds)

true_model = HMM(K=K, D=D, M=0, observation=true_observation)

z, data = true_model.sample(T, return_np=False)

# Define a model to fit the data

tran = LinearTransformation(K=K, d_in=D, D=D)
class ARGaussianObservation(BaseObservation):
    """
    A mixture of gaussians
    # TODO: subclassing ARObservation
    """
    def __init__(self,
                 K,
                 D,
                 M=0,
                 transformation='linear',
                 mus_init=None,
                 sigmas=None,
                 lags=1,
                 train_sigma=True):
        super(ARGaussianObservation, self).__init__(K, D, M)

        if mus_init is None:
            self.mus_init = torch.zeros(self.K, self.D, dtype=torch.float64)
        else:
            self.mus_init = check_and_convert_to_tensor(mus_init)

        # consider diagonal covariance
        self.log_sigmas_init = torch.tensor(np.log(np.ones((K, D))),
                                            dtype=torch.float64)

        if sigmas is None:
            self.log_sigmas = nn.Parameter(torch.tensor(np.log(5 * np.ones(
                (K, D))),
                                                        dtype=torch.float64),
                                           requires_grad=train_sigma)
        else:
            # TODO: assert sigmas positive
            assert sigmas.shape == (self.K, self.D)
            self.log_sigmas = nn.Parameter(torch.tensor(np.log(sigmas),
                                                        dtype=torch.float64),
                                           requires_grad=True)

        self.lags = lags

        if isinstance(transformation, str):
            if transformation == 'linear':
                self.transformation = LinearTransformation(K=self.K,
                                                           D=self.D,
                                                           lags=self.lags)
        else:
            assert isinstance(transformation, BaseTransformation)
            self.transformation = transformation
            self.lags = self.transformation.lags

    def _get_scale_tril(self, log_sigmas):
        sigmas = torch.exp(log_sigmas)
        return torch.diag_embed(sigmas)

    def _compute_mus_based_on(self, data):
        pass

    def _compute_mus_for(self, data):
        """
        compute the mean vector for each observation (using the previous observation, or mus_init)
        :param data: (T,D)
        :return: mus: (T, K, D)
        """

        T, D = data.shape
        assert D == self.D

        if T < self.lags:
            mus = self.mus_init * torch.ones(
                T, self.K, self.D, dtype=torch.float64)
        else:
            mus_rest = self.transformation.transform(
                data[:-1])  # (T-1-momentum_lags+1, K, D)
            assert mus_rest.shape == (T - 1 - self.lags + 1, self.K, D)

            # add repeated momentum_lags
            mus = torch.cat(
                (self.mus_init *
                 torch.ones(self.lags, self.K, self.D, dtype=torch.float64),
                 mus_rest))

        assert mus.shape == (T, self.K, self.D)
        return mus

    def log_prob(self, data, **kwargs):
        """

        :param data: shape (T, D)
        :return: log prob under each possible z_t: shape (T, K)
        """

        mus = self._compute_mus_for(data)  # (T, K, D)
        T = mus.shape[0]

        p_init = Normal(mus[0],
                        torch.exp(self.log_sigmas_init))  # mus[0] (K, D)
        log_prob_init = p_init.log_prob(
            data[0])  # data[0] (D, ). log_prob_init: (K, D)
        log_prob_init = torch.sum(log_prob_init, dim=-1)  # (K, )

        if T == 1:
            return log_prob_init[None, ]

        p = Normal(mus[1:], torch.exp(self.log_sigmas))

        log_prob_ar = p.log_prob(data[1:, None])  # (T, K, D)
        log_prob_ar = torch.sum(log_prob_ar, dim=-1)  # (T-1, K)

        return torch.cat((log_prob_init[None, ], log_prob_ar))

    def rsample_x(self, z, xhist=None, with_noise=False, **kwargs):
        """
        generate reparameterized samples
        :param z: shape ()
        :param xhist: shape (T_pre, D)
        :return: x: shape (D,)
        """
        # TODO: test momentum_lags
        # no previous x
        if xhist is None or xhist.shape[0] < self.lags:
            mu = self.mus_init[z]  # (D,)
            sigmas_z = torch.exp(self.log_sigmas_init[z])  # (D,)
        else:
            # sample from the autoregressive distribution
            assert len(xhist.shape) == 2
            mu = self.transformation.transform_condition_on_z(
                z, xhist[-self.lags:])  # (D, )
            sigmas_z = torch.exp(self.log_sigmas[z])  # (D,)

        if with_noise:
            return mu

        out = mu + sigmas_z * torch.randn(self.D,
                                          dtype=torch.float64)  # (self.D, )

        return out