コード例 #1
0
 def do_sampling(observed_time_series, is_missing):
     return gibbs_sampler.fit_with_gibbs_sampling(
         model,
         tfp.sts.MaskedTimeSeries(observed_time_series, is_missing),
         num_results=4,
         num_warmup_steps=1,
         seed=seed)
コード例 #2
0
 def do_sampling():
   return gibbs_sampler.fit_with_gibbs_sampling(
       model,
       observed_time_series,
       num_results=100,
       num_warmup_steps=100,
       seed=test_util.test_seed(sampler_type='stateless'))
コード例 #3
0
 def do_sampling_again(observed_time_series, is_missing):
   return gibbs_sampler.fit_with_gibbs_sampling(
       dummy_model,
       tfp.sts.MaskedTimeSeries(observed_time_series, is_missing),
       num_results=30,
       num_warmup_steps=10,
       seed=sample_seed)
コード例 #4
0
    def test_forecasts_are_sane(self):
        seed = test_util.test_seed()
        num_observed_steps = 5
        num_forecast_steps = 3
        model, observed_time_series, is_missing = self._build_test_model(
            num_timesteps=num_observed_steps + num_forecast_steps,
            batch_shape=[3])

        samples = gibbs_sampler.fit_with_gibbs_sampling(
            model,
            tfp.sts.MaskedTimeSeries(
                observed_time_series[..., :num_observed_steps, tf.newaxis],
                is_missing[..., :num_observed_steps]),
            num_results=5,
            num_warmup_steps=10,
            seed=seed,
            compile_steps_with_xla=False)
        predictive_dist = gibbs_sampler.one_step_predictive(
            model,
            samples,
            num_forecast_steps=num_forecast_steps,
            thin_every=1)
        predictive_mean, predictive_stddev = self.evaluate(
            (predictive_dist.mean(), predictive_dist.stddev()))

        self.assertAllEqual(predictive_mean.shape,
                            [3, num_observed_steps + num_forecast_steps])
        self.assertAllEqual(predictive_stddev.shape,
                            [3, num_observed_steps + num_forecast_steps])

        # Uncertainty should increase over the forecast period.
        self.assertTrue(
            np.all(predictive_stddev[..., num_observed_steps + 1:] >
                   predictive_stddev[..., num_observed_steps:-1]))
コード例 #5
0
 def do_sampling():
   return gibbs_sampler.fit_with_gibbs_sampling(
       model,
       observed_time_series,
       num_results=100,
       num_warmup_steps=100,
       seed=test_util.test_seed(sampler_type='stateless'),
       experimental_use_dynamic_cholesky=use_dyanamic_cholesky)
コード例 #6
0
 def do_sampling():
   return gibbs_sampler.fit_with_gibbs_sampling(
       model,
       tfp.sts.MaskedTimeSeries(
           observed_time_series[..., :num_observed_steps, tf.newaxis],
           is_missing[..., :num_observed_steps]),
       num_chains=num_chains,
       num_results=num_results,
       num_warmup_steps=100,
       seed=seed)
コード例 #7
0
 def _run():
     model, observed_time_series, is_missing = self._build_test_model(
         num_timesteps=336, batch_shape=[])
     return gibbs_sampler.fit_with_gibbs_sampling(
         model,
         tfp.sts.MaskedTimeSeries(observed_time_series[..., tf.newaxis],
                                  is_missing),
         num_results=500,
         num_warmup_steps=100,
         seed=seed)
コード例 #8
0
  def test_invalid_model_raises_error(self):
    observed_time_series = tf.convert_to_tensor([1., 0., -1., 2.])
    bad_model = tfp.sts.Sum(
        [tfp.sts.LinearRegression(design_matrix=tf.ones([4, 2])),
         tfp.sts.LocalLevel(observed_time_series=observed_time_series),],
        observed_time_series=observed_time_series)

    with self.assertRaisesRegexp(ValueError, 'does not support Gibbs sampling'):
      gibbs_sampler.fit_with_gibbs_sampling(
          bad_model, observed_time_series, seed=test_util.test_seed())

    bad_model.supports_gibbs_sampling = True
    with self.assertRaisesRegexp(
        ValueError, 'Expected the first model component to be an instance of'):
      gibbs_sampler.fit_with_gibbs_sampling(
          bad_model, observed_time_series, seed=test_util.test_seed())

    bad_model_with_correct_params = tfp.sts.Sum([
        # A seasonal model with no drift has no parameters, so adding it
        # won't break the check for correct params.
        tfp.sts.Seasonal(num_seasons=2,
                         allow_drift=False,
                         observed_time_series=observed_time_series),
        tfp.sts.LocalLevel(observed_time_series=observed_time_series),
        tfp.sts.LinearRegression(design_matrix=tf.ones([5, 2]))])
    bad_model_with_correct_params.supports_gibbs_sampling = True

    with self.assertRaisesRegexp(ValueError,
                                 'Expected the first model component to be an '
                                 'instance of `tfp.sts.LocalLevel`'):
      gibbs_sampler.fit_with_gibbs_sampling(bad_model_with_correct_params,
                                            observed_time_series,
                                            seed=test_util.test_seed())
コード例 #9
0
    def test_end_to_end_prediction_works_and_is_deterministic(
            self, dtype, use_xla):
        if not tf.executing_eagerly():
            return
        seed = test_util.test_seed()
        model, observed_time_series, is_missing = self._build_test_model(
            num_timesteps=5, batch_shape=[3])

        samples = gibbs_sampler.fit_with_gibbs_sampling(
            model,
            tfp.sts.MaskedTimeSeries(observed_time_series[..., tf.newaxis],
                                     is_missing),
            num_results=4,
            num_warmup_steps=1,
            seed=seed,
            compile_steps_with_xla=use_xla)
        predictive_dist = gibbs_sampler.one_step_predictive(model,
                                                            samples,
                                                            thin_every=1)

        # Test that the seeded calculation gives the same result on multiple runs.
        samples2 = gibbs_sampler.fit_with_gibbs_sampling(
            model,
            tfp.sts.MaskedTimeSeries(observed_time_series, is_missing),
            num_results=4,
            num_warmup_steps=1,
            seed=seed,
            compile_steps_with_xla=use_xla)
        predictive_dist2 = gibbs_sampler.one_step_predictive(model,
                                                             samples2,
                                                             thin_every=1)

        (predictive_mean_, predictive_stddev_, predictive_mean2_,
         predictive_stddev2_) = self.evaluate(
             (predictive_dist.mean(), predictive_dist.stddev(),
              predictive_dist2.mean(), predictive_dist2.stddev()))
        self.assertAllEqual(predictive_mean_, predictive_mean2_)
        self.assertAllEqual(predictive_stddev_, predictive_stddev2_)
コード例 #10
0
def _fit_seasonal_model_with_gibbs_sampling(observed_time_series,
                                            seasonal_structure,
                                            num_warmup_steps=50,
                                            num_results=100,
                                            seed=None):
    """Builds a seasonality-as-regression model and fits it by Gibbs sampling."""
    with tf.name_scope('fit_seasonal_model_with_gibbs_sampling'):
        observed_time_series = sts_util.canonicalize_observed_time_series_with_mask(
            observed_time_series)
        dtype = observed_time_series.time_series.dtype
        design_matrix = seasonality_util.build_fixed_effects(
            num_steps=ps.shape(observed_time_series.time_series)[-2],
            seasonal_structure=seasonal_structure,
            dtype=dtype)

        # Default priors.
        # pylint: disable=protected-access
        one = tf.ones([], dtype=dtype)
        level_variance_prior = tfd.InverseGamma(concentration=16,
                                                scale=16. * 0.001**2 * one)
        level_variance_prior._upper_bound = one
        slope_variance_prior = tfd.InverseGamma(concentration=16,
                                                scale=16. * 0.05**2 * one)
        slope_variance_prior._upper_bound = 0.01 * one
        observation_noise_variance_prior = tfd.InverseGamma(concentration=0.05,
                                                            scale=0.05 * one)
        observation_noise_variance_prior._upper_bound = 1.2 * one
        # pylint: enable=protected-access

    model = gibbs_sampler.build_model_for_gibbs_fitting(
        observed_time_series=observed_time_series,
        design_matrix=design_matrix,
        weights_prior=tfd.Normal(loc=0., scale=one),
        level_variance_prior=level_variance_prior,
        slope_variance_prior=slope_variance_prior,
        observation_noise_variance_prior=observation_noise_variance_prior)
    return [
        model,
        gibbs_sampler.fit_with_gibbs_sampling(
            model,
            observed_time_series,
            num_results=num_results,
            num_warmup_steps=num_warmup_steps,
            seed=seed)
    ]
コード例 #11
0
    def test_benchmark_sampling_with_xla(self):
        if not tf.executing_eagerly():
            return
        seed = tfp_test_util.test_seed()
        model, observed_time_series, is_missing = self._build_test_model(
            num_timesteps=336, batch_shape=[])

        t0 = time.time()
        samples = gibbs_sampler.fit_with_gibbs_sampling(
            model,
            tfp.sts.MaskedTimeSeries(observed_time_series[..., tf.newaxis],
                                     is_missing),
            num_results=500,
            num_warmup_steps=100,
            seed=seed,
            compile_steps_with_xla=True)
        t1 = time.time()
        print('Drew (100+500) samples in time', t1 - t0)
        print('Results:', samples)
コード例 #12
0
    def test_forecasts_match_reference(self, use_slope):
        seed = test_util.test_seed()
        num_observed_steps = 5
        num_forecast_steps = 4
        model, observed_time_series, is_missing = self._build_test_model(
            num_timesteps=num_observed_steps + num_forecast_steps,
            true_slope_scale=0.5 if use_slope else None,
            batch_shape=[3])

        samples = tf.function(lambda: gibbs_sampler.fit_with_gibbs_sampling(  # pylint: disable=g-long-lambda
            model,
            tfp.sts.MaskedTimeSeries(
                observed_time_series[..., :num_observed_steps, tf.newaxis],
                is_missing[..., :num_observed_steps]),
            num_results=10000,
            num_warmup_steps=100,
            seed=seed))()
        predictive_dist = gibbs_sampler.one_step_predictive(
            model,
            samples,
            num_forecast_steps=num_forecast_steps,
            thin_every=1)
        predictive_mean, predictive_stddev = self.evaluate(
            (predictive_dist.mean(), predictive_dist.stddev()))
        self.assertAllEqual(predictive_mean.shape,
                            [3, num_observed_steps + num_forecast_steps])
        self.assertAllEqual(predictive_stddev.shape,
                            [3, num_observed_steps + num_forecast_steps])

        if use_slope:
            parameter_samples = (samples.observation_noise_scale,
                                 samples.level_scale, samples.slope_scale,
                                 samples.weights)
        else:
            parameter_samples = (samples.observation_noise_scale,
                                 samples.level_scale, samples.weights)

        # Note that although we expect the Gibbs-sampled forecasts to match a
        # reference implementation, we *don't* expect the one-step predictions to
        # match `tfp.sts.one_step_predictive`, because that makes predictions using
        # a filtered posterior (i.e., given only previous observations) whereas the
        # Gibbs-sampled latent `level`s will incorporate some information from
        # future observations.
        reference_forecast_dist = tfp.sts.forecast(
            model,
            observed_time_series=observed_time_series[
                ..., :num_observed_steps],
            parameter_samples=parameter_samples,
            num_steps_forecast=num_forecast_steps)

        reference_forecast_mean = self.evaluate(
            reference_forecast_dist.mean()[..., 0])
        reference_forecast_stddev = self.evaluate(
            reference_forecast_dist.stddev()[..., 0])

        self.assertAllClose(predictive_mean[..., -num_forecast_steps:],
                            reference_forecast_mean,
                            atol=1.0 if use_slope else 0.3)
        self.assertAllClose(predictive_stddev[..., -num_forecast_steps:],
                            reference_forecast_stddev,
                            atol=2.0 if use_slope else 1.0)