def _fit_seasonal_model_with_gibbs_sampling(observed_time_series,
                                            seasonal_structure,
                                            num_warmup_steps=50,
                                            num_results=100,
                                            seed=None):
    """Builds a seasonality-as-regression model and fits it by Gibbs sampling."""
    with tf.name_scope('fit_seasonal_model_with_gibbs_sampling'):
        observed_time_series = sts_util.canonicalize_observed_time_series_with_mask(
            observed_time_series)
        dtype = observed_time_series.time_series.dtype
        design_matrix = seasonality_util.build_fixed_effects(
            num_steps=ps.shape(observed_time_series.time_series)[-2],
            seasonal_structure=seasonal_structure,
            dtype=dtype)

        # Default priors.
        # pylint: disable=protected-access
        one = tf.ones([], dtype=dtype)
        level_variance_prior = tfd.InverseGamma(concentration=16,
                                                scale=16. * 0.001**2 * one)
        level_variance_prior._upper_bound = one
        slope_variance_prior = tfd.InverseGamma(concentration=16,
                                                scale=16. * 0.05**2 * one)
        slope_variance_prior._upper_bound = 0.01 * one
        observation_noise_variance_prior = tfd.InverseGamma(concentration=0.05,
                                                            scale=0.05 * one)
        observation_noise_variance_prior._upper_bound = 1.2 * one
        # pylint: enable=protected-access

    model = gibbs_sampler.build_model_for_gibbs_fitting(
        observed_time_series=observed_time_series,
        design_matrix=design_matrix,
        weights_prior=tfd.Normal(loc=0., scale=one),
        level_variance_prior=level_variance_prior,
        slope_variance_prior=slope_variance_prior,
        observation_noise_variance_prior=observation_noise_variance_prior)
    return [
        model,
        gibbs_sampler.fit_with_gibbs_sampling(
            model,
            observed_time_series,
            num_results=num_results,
            num_warmup_steps=num_warmup_steps,
            seed=seed)
    ]
Ejemplo n.º 2
0
    def test_log_prob_matches_linear_gaussian_ssm(self):
        dim = 2
        batch_shape = [3, 1]
        seed, *model_seeds = samplers.split_seed(test_util.test_seed(), n=6)

        # Sample a random linear Gaussian process.
        prior_loc = self.evaluate(
            tfd.Normal(0., 1.).sample(batch_shape + [dim],
                                      seed=model_seeds[0]))
        prior_scale = self.evaluate(
            tfd.InverseGamma(1., 1.).sample(batch_shape + [dim],
                                            seed=model_seeds[1]))
        transition_matrix = self.evaluate(
            tfd.Normal(0., 1.).sample([dim, dim], seed=model_seeds[2]))
        transition_bias = self.evaluate(
            tfd.Normal(0., 1.).sample(batch_shape + [dim],
                                      seed=model_seeds[3]))
        transition_scale_tril = self.evaluate(
            tf.linalg.cholesky(
                tfd.WishartTriL(
                    df=dim,
                    scale_tril=tf.eye(dim)).sample(seed=model_seeds[4])))

        initial_state_prior = tfd.MultivariateNormalDiag(
            loc=prior_loc, scale_diag=prior_scale, name='initial_state_prior')

        lgssm = tfd.LinearGaussianStateSpaceModel(
            num_timesteps=7,
            transition_matrix=transition_matrix,
            transition_noise=tfd.MultivariateNormalTriL(
                loc=transition_bias, scale_tril=transition_scale_tril),
            # Trivial observation model to pass through the latent state.
            observation_matrix=tf.eye(dim),
            observation_noise=tfd.MultivariateNormalDiag(
                loc=tf.zeros(dim), scale_diag=tf.zeros(dim)),
            initial_state_prior=initial_state_prior)

        markov_chain = tfd.MarkovChain(
            initial_state_prior=initial_state_prior,
            transition_fn=lambda _, x: tfd.MultivariateNormalTriL(  # pylint: disable=g-long-lambda
                loc=tf.linalg.matvec(transition_matrix, x) + transition_bias,
                scale_tril=transition_scale_tril),
            num_steps=7)

        x = markov_chain.sample(5, seed=seed)
        self.assertAllClose(lgssm.log_prob(x),
                            markov_chain.log_prob(x),
                            rtol=1e-5)
Ejemplo n.º 3
0
def _resample_scale(prior_concentration, prior_scale,
                    observed_residuals, is_missing=None, seed=None):
  """Samples a scale parameter from its conditional posterior.

  We assume the conjugate InverseGamma->Normal model:

  ```
  scale ~ Sqrt(InverseGamma(prior_concentration, prior_scale))
  for i in [1, ..., num_observations]:
    x[i] ~ Normal(loc, scale)
  ```

  in which `loc` is known, and return a sample from `p(scale | x)`.

  Args:
    prior_concentration: Float `Tensor` concentration parameter of the
      InverseGamma prior distribution.
    prior_scale: Float `Tensor` scale parameter of the InverseGamma prior
      distribution.
    observed_residuals: Float `Tensor` of shape `[..., num_observations]`,
      specifying the centered observations `(x - loc)`.
    is_missing: Optional `bool` `Tensor` of shape `[..., num_observations]`. A
      `True` value indicates that the corresponding observation is missing.
    seed: Optional `Python` `int` seed controlling the sampled value.
  Returns:
    sampled_scale: A `Tensor` sample from the posterior `p(scale | x)`.
  """
  if is_missing is not None:
    num_missing = tf.reduce_sum(tf.cast(is_missing, observed_residuals.dtype),
                                axis=-1)
  num_observations = prefer_static.shape(observed_residuals)[-1]
  if is_missing is not None:
    observed_residuals = tf.where(is_missing,
                                  tf.zeros_like(observed_residuals),
                                  observed_residuals)
    num_observations -= num_missing

  variance_posterior = tfd.InverseGamma(
      concentration=prior_concentration + num_observations / 2.,
      scale=prior_scale + tf.reduce_sum(
          tf.square(observed_residuals), axis=-1) / 2.)
  return tf.sqrt(variance_posterior.sample(seed=seed))
Ejemplo n.º 4
0
  def __init__(self,
               design_matrix,
               weights_prior_scale=0.1,
               weights_batch_shape=None,
               name=None):
    """Specify a sparse linear regression model.

    Args:
      design_matrix: float `Tensor` of shape `concat([batch_shape,
        [num_timesteps, num_features]])`. This may also optionally be
        an instance of `tf.linalg.LinearOperator`.
      weights_prior_scale: float `Tensor` defining the scale of the Horseshoe
        prior on regression weights. Small values encourage the weights to be
        sparse. The shape must broadcast with `weights_batch_shape`.
        Default value: `0.1`.
      weights_batch_shape: if `None`, defaults to
        `design_matrix.batch_shape_tensor()`. Must broadcast with the batch
        shape of `design_matrix`.
        Default value: `None`.
      name: the name of this model component.
        Default value: 'SparseLinearRegression'.
    """
    with tf.compat.v1.name_scope(
        name, 'SparseLinearRegression',
        values=[design_matrix, weights_prior_scale]) as name:

      if not isinstance(design_matrix, tfl.LinearOperator):
        design_matrix = tfl.LinearOperatorFullMatrix(
            tf.convert_to_tensor(value=design_matrix, name='design_matrix'),
            name='design_matrix_linop')

      if tf.compat.dimension_value(design_matrix.shape[-1]) is not None:
        num_features = design_matrix.shape[-1]
      else:
        num_features = design_matrix.shape_tensor()[-1]

      if weights_batch_shape is None:
        weights_batch_shape = design_matrix.batch_shape_tensor()
      else:
        weights_batch_shape = tf.convert_to_tensor(value=weights_batch_shape,
                                                   dtype=tf.int32)
      weights_shape = tf.concat([weights_batch_shape, [num_features]], axis=0)

      dtype = design_matrix.dtype

      self._design_matrix = design_matrix
      self._weights_prior_scale = weights_prior_scale

      ones_like_weights_batch = tf.ones(weights_batch_shape, dtype=dtype)
      ones_like_weights = tf.ones(weights_shape, dtype=dtype)
      super(SparseLinearRegression, self).__init__(
          parameters=[
              Parameter('global_scale_variance',
                        prior=tfd.InverseGamma(
                            0.5 * ones_like_weights_batch,
                            0.5 * ones_like_weights_batch),
                        bijector=tfb.Softplus()),
              Parameter('global_scale_noncentered',
                        prior=tfd.HalfNormal(
                            scale=ones_like_weights_batch),
                        bijector=tfb.Softplus()),
              Parameter('local_scale_variances',
                        prior=tfd.Independent(tfd.InverseGamma(
                            0.5 * ones_like_weights,
                            0.5 * ones_like_weights),
                                              reinterpreted_batch_ndims=1),
                        bijector=tfb.Softplus()),
              Parameter('local_scales_noncentered',
                        prior=tfd.Independent(tfd.HalfNormal(
                            scale=ones_like_weights),
                                              reinterpreted_batch_ndims=1),
                        bijector=tfb.Softplus()),
              Parameter('weights_noncentered',
                        prior=tfd.Independent(tfd.Normal(
                            loc=tf.zeros_like(ones_like_weights),
                            scale=ones_like_weights),
                                              reinterpreted_batch_ndims=1),
                        bijector=tfb.Identity())
          ],
          latent_size=0,
          name=name)