Esempio n. 1
0
    def test_logprob(self):
        y = self._build_placeholder([1.0, 1.3, 1.9, 2.9, 2.1])

        ssm = LocalLevelStateSpaceModel(
            num_timesteps=5,
            level_scale=0.5,
            initial_state_prior=tfd.MultivariateNormalDiag(
                scale_diag=self._build_placeholder([1.])))

        lp = ssm.log_prob(y[..., np.newaxis])
        expected_lp = -6.5021
        self.assertAllClose(self.evaluate(lp), expected_lp)
  def test_batch_shape(self):
    batch_shape = [4, 2]

    level_scale = self._build_placeholder(
        np.exp(np.random.randn(*batch_shape)))
    initial_state_prior = tfd.MultivariateNormalDiag(
        scale_diag=self._build_placeholder([1.]))

    ssm = LocalLevelStateSpaceModel(
        num_timesteps=10,
        level_scale=level_scale,
        initial_state_prior=initial_state_prior)
    self.assertAllEqual(self.evaluate(ssm.batch_shape_tensor()), batch_shape)

    y = ssm.sample()
    self.assertAllEqual(self.evaluate(tf.shape(y))[:-2], batch_shape)
Esempio n. 3
0
  def test_joint_sample(self):
    seed = test_util.test_seed(sampler_type='stateless')
    batch_shape = [4, 2]

    level_scale = self._build_placeholder(2 * np.ones(batch_shape))
    observation_noise_scale = self._build_placeholder(1.)
    initial_state_prior = tfd.MultivariateNormalDiag(
        loc=self._build_placeholder([-3]),
        scale_diag=self._build_placeholder([1.]))

    ssm = LocalLevelStateSpaceModel(
        num_timesteps=10,
        level_scale=level_scale,
        observation_noise_scale=observation_noise_scale,
        initial_state_prior=initial_state_prior)

    num_samples = 10000
    sampled_latents, sampled_obs = ssm._joint_sample_n(n=num_samples,
                                                       seed=seed)
    latent_mean, obs_mean = ssm._joint_mean()
    latent_cov, obs_cov = ssm._joint_covariances()
    (sampled_latents_, sampled_obs_,
     latent_mean_, obs_mean_,
     latent_std_, obs_std_) = self.evaluate((sampled_latents, sampled_obs,
                                             latent_mean, obs_mean,
                                             tf.sqrt(latent_cov[..., 0]),
                                             tf.sqrt(obs_cov[..., 0])))

    # Instead of directly comparing means and stddevs, we normalize by stddev
    # to make the stderr constant.
    self.assertAllClose(np.mean(sampled_latents_, axis=0) / latent_std_,
                        latent_mean_ / latent_std_,
                        atol=4. / np.sqrt(num_samples))
    self.assertAllClose(np.mean(sampled_obs_, axis=0) / obs_std_,
                        obs_mean_ / obs_std_,
                        atol=4. / np.sqrt(num_samples))
    self.assertAllClose(np.std(sampled_latents_, axis=0) / latent_std_,
                        np.ones(latent_std_.shape, dtype=latent_std_.dtype),
                        atol=4. / np.sqrt(num_samples))
    self.assertAllClose(np.std(sampled_obs_, axis=0) / obs_std_,
                        np.ones(obs_std_.shape, dtype=obs_std_.dtype),
                        atol=4. / np.sqrt(num_samples))
Esempio n. 4
0
    def test_stats(self):
        # Build a model with expected initial scale 0.
        level_scale = self._build_placeholder(1.0)
        initial_state_prior = tfd.MultivariateNormalDiag(
            loc=self._build_placeholder([0.]),
            scale_diag=self._build_placeholder([1.]))

        ssm = LocalLevelStateSpaceModel(
            num_timesteps=10,
            level_scale=level_scale,
            initial_state_prior=initial_state_prior)

        # In expectation, the process is constant.
        mean = self.evaluate(ssm.mean())
        self.assertAllClose(mean, np.zeros(10)[:, np.newaxis])

        # variance of level[T] is T * level_scale
        expected_variance = np.arange(1, 11)[:, np.newaxis]
        variance = self.evaluate(ssm.variance())
        self.assertAllClose(variance, expected_variance)
    def testEqualsLocalLevel(self):
        # An AR1 process with coef 1 is just a random walk, equivalent to a local
        # level model. Test that both models define the same distribution
        # (log-prob).
        num_timesteps = 10
        observed_time_series = self._build_placeholder(
            np.random.randn(num_timesteps, 1))

        level_scale = self._build_placeholder(0.1)

        # We'll test an AR1 process, and also (just for kicks) that the trivial
        # embedding as an AR2 process gives the same model.
        coefficients_order1 = np.array([1.]).astype(self.dtype)
        coefficients_order2 = np.array([1., 0.]).astype(self.dtype)

        ar1_ssm = AutoregressiveStateSpaceModel(
            num_timesteps=num_timesteps,
            coefficients=coefficients_order1,
            level_scale=level_scale,
            initial_state_prior=tfd.MultivariateNormalDiag(
                scale_diag=[level_scale]))
        ar2_ssm = AutoregressiveStateSpaceModel(
            num_timesteps=num_timesteps,
            coefficients=coefficients_order2,
            level_scale=level_scale,
            initial_state_prior=tfd.MultivariateNormalDiag(
                scale_diag=[level_scale, 1.]))

        local_level_ssm = LocalLevelStateSpaceModel(
            num_timesteps=num_timesteps,
            level_scale=level_scale,
            initial_state_prior=tfd.MultivariateNormalDiag(
                scale_diag=[level_scale]))

        ar1_lp, ar2_lp, ll_lp = self.evaluate(
            (ar1_ssm.log_prob(observed_time_series),
             ar2_ssm.log_prob(observed_time_series),
             local_level_ssm.log_prob(observed_time_series)))
        self.assertAllClose(ar1_lp, ll_lp)
        self.assertAllClose(ar2_lp, ll_lp)