Beispiel #1
0
    def testMatchWithAffineTransform(self):
        direct_bj = tfb.Tanh()
        indirect_bj = tfb.Chain([
            tfb.AffineScalar(shift=tf.cast(-1.0, dtype=tf.float64),
                             scale=tf.cast(2.0, dtype=tf.float64)),
            tfb.Sigmoid(),
            tfb.AffineScalar(scale=tf.cast(2.0, dtype=tf.float64))
        ])

        x = np.linspace(-3.0, 3.0, 100)
        y = np.tanh(x)
        self.assertAllClose(self.evaluate(direct_bj.forward(x)),
                            self.evaluate(indirect_bj.forward(x)))
        self.assertAllClose(self.evaluate(direct_bj.inverse(y)),
                            self.evaluate(indirect_bj.inverse(y)))
        self.assertAllClose(
            self.evaluate(direct_bj.inverse_log_det_jacobian(y,
                                                             event_ndims=0)),
            self.evaluate(
                indirect_bj.inverse_log_det_jacobian(y, event_ndims=0)))
        self.assertAllClose(
            self.evaluate(direct_bj.forward_log_det_jacobian(x,
                                                             event_ndims=0)),
            self.evaluate(
                indirect_bj.forward_log_det_jacobian(x, event_ndims=0)))
Beispiel #2
0
 def testComposeFromChainBijector(self):
   x = tf.constant([-5., 0., 5.])
   sigmoid = functools.reduce(lambda chain, f: chain(f), [
       tfb.Reciprocal(),
       tfb.AffineScalar(shift=1.),
       tfb.Exp(),
       tfb.AffineScalar(scale=-1.),
   ])
   self.assertTrue(isinstance(sigmoid, tfb.Chain))
   self.assertAllClose(
       *self.evaluate([tf.nn.sigmoid(x), sigmoid.forward(x)]),
       atol=0, rtol=1e-3)
 def testTinyScale(self):
     log_scale = tf.cast(-2000., self.dtype)
     x = tf.cast(1., self.dtype)
     scale = tf.exp(log_scale)
     fldj_linear = tfb.AffineScalar(scale=scale).forward_log_det_jacobian(
         x, event_ndims=0)
     fldj_log = tfb.AffineScalar(
         log_scale=log_scale).forward_log_det_jacobian(x, event_ndims=0)
     fldj_linear_, fldj_log_ = self.evaluate([fldj_linear, fldj_log])
     # Using the linear scale will saturate to 0, and produce bad log-det
     # Jacobians.
     self.assertNotEqual(fldj_linear_, fldj_log_)
     self.assertAllClose(-2000., fldj_log_)
Beispiel #4
0
 def testCdfDescendingChained(self):
   bij1 = tfb.AffineScalar(shift=1., scale=[1., -2.])
   bij2 = tfb.AffineScalar(shift=1., scale=[[3.], [-5.]])
   bij3 = tfb.AffineScalar(shift=1., scale=[[[7.]], [[-11.]]])
   for chain in bij2(bij1), bij3(bij2(bij1)):
     td = self._cls()(
         distribution=tfd.Normal(loc=0., scale=tf.ones([2, 2, 2])),
         bijector=chain,
         validate_args=True)
     nd = tfd.Normal(loc=1., scale=3., validate_args=True)
     self.assertAllEqual(tf.ones(td.batch_shape, dtype=tf.bool),
                         td.cdf(nd.quantile(.4)) < td.cdf(nd.quantile(.6)),
                         msg=chain.name)
Beispiel #5
0
    def test_end_to_end_works_correctly(self):
        true_mean = self.dtype([0, 0])
        true_cov = self.dtype([[1, 0.5], [0.5, 1]])
        num_results = 2000

        def target_log_prob(x, y):
            # Corresponds to unnormalized MVN.
            # z = matmul(inv(chol(true_cov)), [x, y] - true_mean)
            z = tf.stack([x, y], axis=-1) - true_mean
            z = tf.squeeze(tf.linalg.triangular_solve(
                np.linalg.cholesky(true_cov), z[..., tf.newaxis]),
                           axis=-1)
            return -0.5 * tf.reduce_sum(z**2., axis=-1)

        transformed_hmc = tfp.mcmc.TransformedTransitionKernel(
            inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(
                target_log_prob_fn=tf.function(target_log_prob,
                                               autograph=False),
                # Affine scaling means we have to change the step_size
                # in order to get 60% acceptance, as was done in mcmc/hmc_test.py.
                step_size=[1.23 / 0.75, 1.23 / 0.5],
                num_leapfrog_steps=2,
                seed=_maybe_seed(54)),
            bijector=[
                tfb.AffineScalar(scale=0.75),
                tfb.AffineScalar(scale=0.5),
            ])
        # Recall, tfp.mcmc.sample_chain calls
        # transformed_hmc.bootstrap_results too.
        states, kernel_results = tfp.mcmc.sample_chain(
            num_results=num_results,
            # The initial state is used by inner_kernel.bootstrap_results.
            # Note the input is *after* `bijector.forward`.
            current_state=[self.dtype(-2), self.dtype(2)],
            kernel=transformed_hmc,
            num_burnin_steps=200,
            num_steps_between_results=1,
            parallel_iterations=1)
        states = tf.stack(states, axis=-1)
        self.assertEqual(num_results,
                         tf.compat.dimension_value(states.shape[0]))
        sample_mean = tf.reduce_mean(states, axis=0)
        x = states - sample_mean
        sample_cov = tf.matmul(x, x,
                               transpose_a=True) / self.dtype(num_results)
        [sample_mean_, sample_cov_, is_accepted_] = self.evaluate([
            sample_mean, sample_cov, kernel_results.inner_results.is_accepted
        ])
        self.assertNear(0.6, is_accepted_.mean(), err=0.05)
        self.assertAllClose(true_mean, sample_mean_, atol=0.06, rtol=0.)
        self.assertAllClose(true_cov, sample_cov_, atol=0., rtol=0.16)
Beispiel #6
0
def _build_inference_bijector(parameter):
    """Return a scaling-and-support bijector for inference.

  By default, this is just `param.bijector`, which transforms a real-value input
  to the parameter's support.

  For scale parameters (heuristically detected as any param with a Softplus
  support bijector), we also rescale by the prior stddev. This is
  approximately equivalent to performing inference on a standardized input
  `observed_time_series/stddev(observed_time_series)`, because:
   a) rescaling all the scale parameters is equivalent (gives equivalent
      forecasts, etc) to rescaling the `observed_time_series`.
   b) the default scale priors in STS components have stddev proportional to
     `stddev(observed_time_series)`.

  Args:
    parameter: `sts.Parameter` named tuple instance.
  Returns:
    bijector: a `tfb.Bijector` instance to use in inference.
  """
    if isinstance(parameter.bijector, tfb.Softplus):
        try:
            # Use mean + stddev, rather than just stddev, to ensure a reasonable
            # init if the user passes a crazy custom prior like N(100000, 0.001).
            prior_scale = tf.abs(
                parameter.prior.mean()) + parameter.prior.stddev()
            return tfb.Chain(
                [tfb.AffineScalar(scale=prior_scale), parameter.bijector])
        except NotImplementedError:  # Custom prior with no mean and/or stddev.
            pass
    return parameter.bijector
 def testScalarCongruencyLogScale(self):
     bijector = tfb.AffineScalar(shift=self.dtype(3.6),
                                 log_scale=self.dtype(np.log(0.42)))
     bijector_test_util.assert_scalar_congruency(bijector,
                                                 lower_x=self.dtype(-2.),
                                                 upper_x=self.dtype(2.),
                                                 eval_func=self.evaluate)
Beispiel #8
0
    def testValuesAreCorrectScalarTransform(self, feature_ndims, dims):
        amplitude = self.dtype(5.)
        length_scale = self.dtype(0.2)
        kernel = tfpk.ExponentiatedQuadratic(amplitude, length_scale,
                                             feature_ndims)
        input_shape = [dims] * feature_ndims

        bij = bijectors.AffineScalar(self.dtype(0.), self.dtype(2.))

        # Flat multiplication by 2.
        def scale_transform(x, feature_ndims, param_expansion_ndims):
            del feature_ndims, param_expansion_ndims
            return bij.forward(x)

        scale_transformed_kernel = tfpk.FeatureTransformed(
            kernel, transformation_fn=scale_transform)

        x = np.random.uniform(-1, 1, size=input_shape).astype(self.dtype)
        y = np.random.uniform(-1, 1, size=input_shape).astype(self.dtype)
        self.assertAllClose(
            _numpy_exp_quad(amplitude,
                            length_scale,
                            2. * x,
                            2. * y,
                            feature_ndims=feature_ndims),
            self.evaluate(scale_transformed_kernel.apply(x, y)))
Beispiel #9
0
 def testCdfDescending(self):
     td = self._cls()(distribution=tfd.Normal(loc=0., scale=[1., 1.]),
                      bijector=tfb.AffineScalar(shift=1., scale=[2., -2.]),
                      validate_args=True)
     nd = tfd.Normal(loc=1., scale=2., validate_args=True)
     self.assertAllEqual(tf.ones(td.batch_shape, dtype=tf.bool),
                         td.cdf(nd.quantile(.8)) < td.cdf(nd.quantile(.9)))
 def testModifiedVariableScaleAssertion(self):
     v = tf.Variable(1.)
     self.evaluate(v.initializer)
     b = tfb.AffineScalar(scale=v, validate_args=True)
     with self.assertRaisesOpError("Argument `scale` must be non-zero"):
         with tf.control_dependencies([v.assign(0.)]):
             _ = self.evaluate(b.forward(1.))
  def testVariableGradients(self):
    b = tfb.AffineScalar(
        shift=tf.Variable(1.),
        scale=tf.Variable(2.))

    with tf.GradientTape() as tape:
      y = b.forward(.1)
    self.assertAllNotNone(tape.gradient(y, [b.shift, b.scale]))
Beispiel #12
0
 def testScalarBatchScalarEventIdentityScale(self):
   exp2 = self._cls()(
       tfd.Exponential(rate=0.25), bijector=tfb.AffineScalar(scale=2.))
   log_prob = exp2.log_prob(1.)
   log_prob_ = self.evaluate(log_prob)
   base_log_prob = -0.5 * 0.25 + np.log(0.25)
   ildj = np.log(2.)
   self.assertAllClose(base_log_prob - ildj, log_prob_, rtol=1e-6, atol=0.)
Beispiel #13
0
 def testComposeFromTransformedDistribution(self):
   actual_log_normal = tfb.Exp()(tfd.TransformedDistribution(
       distribution=tfd.Normal(0, 1),
       bijector=tfb.AffineScalar(shift=0.5, scale=2.)))
   expected_log_normal = tfd.LogNormal(0.5, 2.)
   x = tf.constant([0.1, 1., 5.])
   self.assertAllClose(
       *self.evaluate([actual_log_normal.log_prob(x),
                       expected_log_normal.log_prob(x)]),
       atol=0, rtol=1e-3)
Beispiel #14
0
    def _bijector_fn(x):
        if tensorshape_util.rank(x.shape) == 1:
            x = x[tf.newaxis, ...]
            reshape_output = lambda x: x[0]
        else:
            reshape_output = lambda x: x

        shift, logit_gate = tf.unstack(layer(x), axis=-1)
        shift = reshape_output(shift)
        logit_gate = reshape_output(logit_gate)
        gate = tf.nn.sigmoid(logit_gate)
        return tfb.AffineScalar(shift=(1. - gate) * shift, scale=gate)
    def _bijector_fn(x, output_units):
        if tensorshape_util.rank(x.shape) == 1:
            x = x[tf.newaxis, ...]
            reshape_output = lambda x: x[0]
        else:
            reshape_output = lambda x: x

        out = tf1.layers.dense(inputs=x, units=2 * output_units)
        shift, logit_gate = tf.split(out, 2, axis=-1)
        shift = reshape_output(shift)
        logit_gate = reshape_output(logit_gate)
        gate = tf.nn.sigmoid(logit_gate)
        return tfb.AffineScalar(shift=(1. - gate) * shift, scale=gate)
Beispiel #16
0
  def test_copy_works(self):
    transformed = tfp.mcmc.TransformedTransitionKernel(
        inner_kernel=FakeInnerKernel(target_log_prob_fn=fake_target_log_prob),
        bijector=tfb.AffineScalar(2.))

    transformed_copy = tfp.mcmc.TransformedTransitionKernel(
        **transformed.parameters)

    pkr, pkr_copy = self.evaluate([
        transformed.bootstrap_results(1.),
        transformed_copy.bootstrap_results(1.)
    ])

    self.assertAllClose(pkr.inner_results.target_log_prob,
                        pkr_copy.inner_results.target_log_prob)
Beispiel #17
0
    def testMatchWithAffineTransform(self):
        with self.test_session():
            direct_bj = tfb.Tanh()
            indirect_bj = tfb.Chain([
                tfb.AffineScalar(shift=tf.to_double(-1.0),
                                 scale=tf.to_double(2.0)),
                tfb.Sigmoid(),
                tfb.AffineScalar(scale=tf.to_double(2.0))
            ])

            x = np.linspace(-3.0, 3.0, 100)
            y = np.tanh(x)
            self.assertAllClose(
                direct_bj.forward(x).eval(),
                indirect_bj.forward(x).eval())
            self.assertAllClose(
                direct_bj.inverse(y).eval(),
                indirect_bj.inverse(y).eval())
            self.assertAllClose(
                direct_bj.inverse_log_det_jacobian(y, event_ndims=0).eval(),
                indirect_bj.inverse_log_det_jacobian(y, event_ndims=0).eval())
            self.assertAllClose(
                direct_bj.forward_log_det_jacobian(x, event_ndims=0).eval(),
                indirect_bj.forward_log_det_jacobian(x, event_ndims=0).eval())
  def testNoBatchScalar(self):
    def static_run(fun, x, **kwargs):
      return self.evaluate(fun(x, **kwargs))

    def dynamic_run(fun, x_value, **kwargs):
      x_value = np.array(x_value, dtype=self.dtype)
      x = tf1.placeholder_with_default(x_value, shape=None)
      return self.evaluate(fun(x, **kwargs))

    for run in (static_run, dynamic_run):
      bijector = tfb.AffineScalar(shift=self.dtype(-1.), scale=self.dtype(2.))
      x = self.dtype([1., 2, 3])  # Three scalar samples (no batches).
      self.assertAllClose([1., 3, 5], run(bijector.forward, x))
      self.assertAllClose([1., 1.5, 2.], run(bijector.inverse, x))
      self.assertAllClose(
          -np.log(2.),
          run(bijector.inverse_log_det_jacobian, x, event_ndims=0))
    def testOneBatchScalarViaIdentityUserProvidesShiftOnly(self):
        def static_run(fun, x, **kwargs):
            return self.evaluate(fun(x, **kwargs))

        def dynamic_run(fun, x_value, **kwargs):
            x_value = np.array(x_value, dtype=self.dtype)
            x = tf1.placeholder_with_default(x_value, shape=None)
            return self.evaluate(fun(x, **kwargs))

        for run in (static_run, dynamic_run):
            # Batched shift
            bijector = tfb.AffineScalar(shift=self.dtype([1.]))
            x = self.dtype([1.])  # One sample from one batches.
            self.assertAllClose([2.], run(bijector.forward, x))
            self.assertAllClose([0.], run(bijector.inverse, x))
            self.assertAllClose(
                0., run(bijector.inverse_log_det_jacobian, x, event_ndims=0))
    def testTwoBatchScalarIdentityViaIdentity(self):
        def static_run(fun, x, **kwargs):
            return self.evaluate(fun(x, **kwargs))

        def dynamic_run(fun, x_value, **kwargs):
            x_value = np.array(x_value, dtype=self.dtype)
            x = tf1.placeholder_with_default(x_value, shape=None)
            return self.evaluate(fun(x, **kwargs))

        for run in (static_run, dynamic_run):
            # Batch of 2 shifts
            bijector = tfb.AffineScalar(shift=self.dtype([1., -1]))
            x = self.dtype([1., 1])  # One sample from each of two batches.
            self.assertAllClose([2., 0], run(bijector.forward, x))
            self.assertAllClose([0., 2], run(bijector.inverse, x))
            self.assertAllClose(
                0., run(bijector.inverse_log_det_jacobian, x, event_ndims=0))
 def testProperties(self):
     # scale corresponds to 1.
     bijector = tfb.AffineScalar(shift=-1.)
     self.assertStartsWith(bijector.name, "affine_scalar")
Beispiel #22
0
  def __init__(self,
               order,
               coefficients_prior=None,
               level_scale_prior=None,
               initial_state_prior=None,
               coefficient_constraining_bijector=None,
               observed_time_series=None,
               name=None):
    """Specify an autoregressive model.

    Args:
      order: scalar Python positive `int` specifying the number of past
        timesteps to regress on.
      coefficients_prior: optional `tfd.Distribution` instance specifying a
        prior on the `coefficients` parameter. If `None`, a default standard
        normal (`tfd.MultivariateNormalDiag(scale_diag=tf.ones([order]))`) prior
        is used.
        Default value: `None`.
      level_scale_prior: optional `tfd.Distribution` instance specifying a prior
        on the `level_scale` parameter. If `None`, a heuristic default prior is
        constructed based on the provided `observed_time_series`.
        Default value: `None`.
      initial_state_prior: optional `tfd.Distribution` instance specifying a
        prior on the initial state, corresponding to the values of the process
        at a set of size `order` of imagined timesteps before the initial step.
        If `None`, a heuristic default prior is constructed based on the
        provided `observed_time_series`.
        Default value: `None`.
      coefficient_constraining_bijector: optional `tfb.Bijector` instance
        representing a constraining mapping for the autoregressive coefficients.
        For example, `tfb.Tanh()` constrains the coefficients to lie in
        `(-1, 1)`, while `tfb.Softplus()` constrains them to be positive, and
        `tfb.Identity()` implies no constraint. If `None`, the default behavior
        constrains the coefficients to lie in `(-1, 1)` using a `Tanh` bijector.
        Default value: `None`.
      observed_time_series: optional `float` `Tensor` of shape
        `batch_shape + [T, 1]` (omitting the trailing unit dimension is also
        supported when `T > 1`), specifying an observed time series.
        Any priors not explicitly set will be given default values according to
        the scale of the observed time series (or batch of time series). May
        optionally be an instance of `tfp.sts.MaskedTimeSeries`, which includes
        a mask `Tensor` to specify timesteps with missing observations.
        Default value: `None`.
      name: the name of this model component.
        Default value: 'Autoregressive'.
    """
    with tf.name_scope(name or 'Autoregressive') as name:
      masked_time_series = None
      if observed_time_series is not None:
        masked_time_series = (
            sts_util.canonicalize_observed_time_series_with_mask(
                observed_time_series))

      dtype = dtype_util.common_dtype(
          [(masked_time_series.time_series
            if masked_time_series is not None else None),
           coefficients_prior,
           level_scale_prior,
           initial_state_prior], dtype_hint=tf.float32)

      if observed_time_series is not None:
        _, observed_stddev, observed_initial = sts_util.empirical_statistics(
            masked_time_series)
      else:
        observed_stddev, observed_initial = (
            tf.convert_to_tensor(value=1., dtype=dtype),
            tf.convert_to_tensor(value=0., dtype=dtype))
      batch_ones = tf.ones(tf.concat([
          tf.shape(observed_initial),  # Batch shape
          [order]], axis=0), dtype=dtype)

      # Heuristic default priors. Overriding these may dramatically
      # change inference performance and results.
      if coefficients_prior is None:
        coefficients_prior = tfd.MultivariateNormalDiag(
            scale_diag=batch_ones)
      if level_scale_prior is None:
        level_scale_prior = tfd.LogNormal(
            loc=tf.math.log(0.05 *  observed_stddev), scale=3.)

      if (coefficients_prior.event_shape.is_fully_defined() and
          order != coefficients_prior.event_shape[0]):
        raise ValueError("Prior dimension {} doesn't match order {}.".format(
            coefficients_prior.event_shape[0], order))

      if initial_state_prior is None:
        initial_state_prior = tfd.MultivariateNormalDiag(
            loc=observed_initial[..., tf.newaxis] * batch_ones,
            scale_diag=(tf.abs(observed_initial) +
                        observed_stddev)[..., tf.newaxis] * batch_ones)

      self._order = order
      self._coefficients_prior = coefficients_prior
      self._level_scale_prior = level_scale_prior
      self._initial_state_prior = initial_state_prior

      if coefficient_constraining_bijector is None:
        coefficient_constraining_bijector = tfb.Tanh()
      super(Autoregressive, self).__init__(
          parameters=[
              Parameter('coefficients',
                        coefficients_prior,
                        coefficient_constraining_bijector),
              Parameter('level_scale', level_scale_prior,
                        tfb.Chain([tfb.AffineScalar(scale=observed_stddev),
                                   tfb.Softplus()]))
          ],
          latent_size=order,
          name=name)
    def __init__(self,
                 level_scale_prior=None,
                 slope_mean_prior=None,
                 slope_scale_prior=None,
                 autoregressive_coef_prior=None,
                 initial_level_prior=None,
                 initial_slope_prior=None,
                 observed_time_series=None,
                 constrain_ar_coef_stationary=True,
                 constrain_ar_coef_positive=False,
                 name=None):
        """Specify a semi-local linear trend model.

    Args:
      level_scale_prior: optional `tfd.Distribution` instance specifying a prior
        on the `level_scale` parameter. If `None`, a heuristic default prior is
        constructed based on the provided `observed_time_series`.
        Default value: `None`.
      slope_mean_prior: optional `tfd.Distribution` instance specifying a prior
        on the `slope_mean` parameter. If `None`, a heuristic default prior is
        constructed based on the provided `observed_time_series`.
        Default value: `None`.
      slope_scale_prior: optional `tfd.Distribution` instance specifying a prior
        on the `slope_scale` parameter. If `None`, a heuristic default prior is
        constructed based on the provided `observed_time_series`.
        Default value: `None`.
      autoregressive_coef_prior: optional `tfd.Distribution` instance specifying
        a prior on the `autoregressive_coef` parameter. If `None`, the default
        prior is a standard `Normal(0., 1.)`. Note that the prior may be
        implicitly truncated by `constrain_ar_coef_stationary` and/or
        `constrain_ar_coef_positive`.
        Default value: `None`.
      initial_level_prior: optional `tfd.Distribution` instance specifying a
        prior on the initial level. If `None`, a heuristic default prior is
        constructed based on the provided `observed_time_series`.
        Default value: `None`.
      initial_slope_prior: optional `tfd.Distribution` instance specifying a
        prior on the initial slope. If `None`, a heuristic default prior is
        constructed based on the provided `observed_time_series`.
        Default value: `None`.
      observed_time_series: optional `float` `Tensor` of shape
        `batch_shape + [T, 1]` (omitting the trailing unit dimension is also
        supported when `T > 1`), specifying an observed time series.
        Any priors not explicitly set will be given default values according to
        the scale of the observed time series (or batch of time series). May
        optionally be an instance of `tfp.sts.MaskedTimeSeries`, which includes
        a mask `Tensor` to specify timesteps with missing observations.
        Default value: `None`.
      constrain_ar_coef_stationary: if `True`, perform inference using a
        parameterization that restricts `autoregressive_coef` to the interval
        `(-1, 1)`, or `(0, 1)` if `force_positive_ar_coef` is also `True`,
        corresponding to stationary processes. This will implicitly truncates
        the support of `autoregressive_coef_prior`.
        Default value: `True`.
      constrain_ar_coef_positive: if `True`, perform inference using a
        parameterization that restricts `autoregressive_coef` to be positive,
        or in `(0, 1)` if `constrain_ar_coef_stationary` is also `True`. This
        will implicitly truncate the support of `autoregressive_coef_prior`.
        Default value: `False`.
      name: the name of this model component.
        Default value: 'SemiLocalLinearTrend'.
    """

        with tf1.name_scope(name,
                            'SemiLocalLinearTrend',
                            values=[observed_time_series]) as name:

            if observed_time_series is not None:
                _, observed_stddev, observed_initial = sts_util.empirical_statistics(
                    observed_time_series)
            else:
                observed_stddev, observed_initial = 1., 0.

            # Heuristic default priors. Overriding these may dramatically
            # change inference performance and results.
            if level_scale_prior is None:
                level_scale_prior = tfd.LogNormal(loc=tf.math.log(
                    .01 * observed_stddev),
                                                  scale=2.)
            if slope_mean_prior is None:
                slope_mean_prior = tfd.Normal(loc=0., scale=observed_stddev)
            if slope_scale_prior is None:
                slope_scale_prior = tfd.LogNormal(loc=tf.math.log(
                    .01 * observed_stddev),
                                                  scale=2.)
            if autoregressive_coef_prior is None:
                autoregressive_coef_prior = tfd.Normal(
                    loc=0., scale=tf.ones_like(observed_initial))
            if initial_level_prior is None:
                initial_level_prior = tfd.Normal(
                    loc=observed_initial,
                    scale=tf.abs(observed_initial) + observed_stddev)
            if initial_slope_prior is None:
                initial_slope_prior = tfd.Normal(loc=0., scale=observed_stddev)

            self._initial_state_prior = tfd.MultivariateNormalDiag(
                loc=tf.stack(
                    [initial_level_prior.mean(),
                     initial_slope_prior.mean()],
                    axis=-1),
                scale_diag=tf.stack([
                    initial_level_prior.stddev(),
                    initial_slope_prior.stddev()
                ],
                                    axis=-1))

            # Constrain the support of the autoregressive coefficient.
            if constrain_ar_coef_stationary and constrain_ar_coef_positive:
                autoregressive_coef_bijector = tfb.Sigmoid(
                )  # support in (0, 1)
            elif constrain_ar_coef_positive:
                autoregressive_coef_bijector = tfb.Softplus(
                )  # support in (0, infty)
            elif constrain_ar_coef_stationary:
                autoregressive_coef_bijector = tfb.Tanh()  # support in (-1, 1)
            else:
                autoregressive_coef_bijector = tfb.Identity()  # unconstrained

            stddev_preconditioner = tfb.AffineScalar(scale=observed_stddev)
            scaled_softplus = tfb.Chain(
                [stddev_preconditioner, tfb.Softplus()])
            super(SemiLocalLinearTrend, self).__init__(parameters=[
                Parameter('level_scale', level_scale_prior, scaled_softplus),
                Parameter('slope_mean', slope_mean_prior,
                          stddev_preconditioner),
                Parameter('slope_scale', slope_scale_prior, scaled_softplus),
                Parameter('autoregressive_coef', autoregressive_coef_prior,
                          autoregressive_coef_bijector),
            ],
                                                       latent_size=2,
                                                       name=name)
Beispiel #24
0
    def __init__(self,
                 num_seasons,
                 num_steps_per_season=1,
                 drift_scale_prior=None,
                 initial_effect_prior=None,
                 constrain_mean_effect_to_zero=True,
                 observed_time_series=None,
                 name=None):
        """Specify a seasonal effects model.

    Args:
      num_seasons: Scalar Python `int` number of seasons.
      num_steps_per_season: Python `int` number of steps in each
        season. This may be either a scalar (shape `[]`), in which case all
        seasons have the same length, or a NumPy array of shape `[num_seasons]`,
        in which seasons have different length, but remain constant around
        different cycles, or a NumPy array of shape `[num_cycles, num_seasons]`,
        in which num_steps_per_season for each season also varies in different
        cycle (e.g., a 4 years cycle with leap day).
        Default value: 1.
      drift_scale_prior: optional `tfd.Distribution` instance specifying a prior
        on the `drift_scale` parameter. If `None`, a heuristic default prior is
        constructed based on the provided `observed_time_series`.
        Default value: `None`.
      initial_effect_prior: optional `tfd.Distribution` instance specifying a
        normal prior on the initial effect of each season. This may be either
        a scalar `tfd.Normal` prior, in which case it applies independently to
        every season, or it may be multivariate normal (e.g.,
        `tfd.MultivariateNormalDiag`) with event shape `[num_seasons]`, in
        which case it specifies a joint prior across all seasons. If `None`, a
        heuristic default prior is constructed based on the provided
        `observed_time_series`.
        Default value: `None`.
      constrain_mean_effect_to_zero: if `True`, use a model parameterization
        that constrains the mean effect across all seasons to be zero. This
        constraint is generally helpful in identifying the contributions of
        different model components and can lead to more interpretable
        posterior decompositions. It may be undesirable if you plan to directly
        examine the latent space of the underlying state space model.
        Default value: `True`.
      observed_time_series: optional `float` `Tensor` of shape
        `batch_shape + [T, 1]` (omitting the trailing unit dimension is also
        supported when `T > 1`), specifying an observed time series.
        Any priors not explicitly set will be given default values according to
        the scale of the observed time series (or batch of time series). May
        optionally be an instance of `tfp.sts.MaskedTimeSeries`, which includes
        a mask `Tensor` to specify timesteps with missing observations.
        Default value: `None`.
      name: the name of this model component.
        Default value: 'Seasonal'.
    """

        with tf1.name_scope(name, 'Seasonal',
                            values=[observed_time_series]) as name:

            _, observed_stddev, observed_initial = (
                sts_util.empirical_statistics(observed_time_series)
                if observed_time_series is not None else (0., 1., 0.))

            # Heuristic default priors. Overriding these may dramatically
            # change inference performance and results.
            if drift_scale_prior is None:
                drift_scale_prior = tfd.LogNormal(loc=tf.math.log(
                    .01 * observed_stddev),
                                                  scale=3.)
            if initial_effect_prior is None:
                initial_effect_prior = tfd.Normal(
                    loc=observed_initial,
                    scale=tf.abs(observed_initial) + observed_stddev)

            dtype = tf.debugging.assert_same_float_dtype(
                [drift_scale_prior, initial_effect_prior])

            if isinstance(initial_effect_prior, tfd.Normal):
                initial_state_prior = tfd.MultivariateNormalDiag(
                    loc=tf.stack([initial_effect_prior.mean()] * num_seasons,
                                 axis=-1),
                    scale_diag=tf.stack([initial_effect_prior.stddev()] *
                                        num_seasons,
                                        axis=-1))
            else:
                initial_state_prior = initial_effect_prior

            if constrain_mean_effect_to_zero:
                # Transform the prior to the residual parameterization used by
                # `ConstrainedSeasonalStateSpaceModel`, imposing a zero-sum constraint.
                # This doesn't change the marginal prior on individual effects, but
                # does introduce dependence between the effects.
                (effects_to_residuals,
                 _) = build_effects_to_residuals_matrix(num_seasons,
                                                        dtype=dtype)
                effects_to_residuals_linop = tf.linalg.LinearOperatorFullMatrix(
                    effects_to_residuals
                )  # Use linop so that matmul broadcasts.
                initial_state_prior_loc = effects_to_residuals_linop.matvec(
                    initial_state_prior.mean())
                initial_state_prior_scale_linop = effects_to_residuals_linop.matmul(
                    initial_state_prior.scale)  # returns LinearOperator
                initial_state_prior = tfd.MultivariateNormalFullCovariance(
                    loc=initial_state_prior_loc,
                    covariance_matrix=initial_state_prior_scale_linop.matmul(
                        initial_state_prior_scale_linop.to_dense(),
                        adjoint_arg=True))

            self._constrain_mean_effect_to_zero = constrain_mean_effect_to_zero
            self._initial_state_prior = initial_state_prior
            self._num_seasons = num_seasons
            self._num_steps_per_season = num_steps_per_season

            super(Seasonal, self).__init__(
                parameters=[
                    Parameter(
                        'drift_scale', drift_scale_prior,
                        tfb.Chain([
                            tfb.AffineScalar(scale=observed_stddev),
                            tfb.Softplus()
                        ])),
                ],
                latent_size=(num_seasons -
                             1 if self.constrain_mean_effect_to_zero else
                             num_seasons),
                name=name)
Beispiel #25
0
    def __init__(self,
                 level_scale_prior=None,
                 initial_level_prior=None,
                 observed_time_series=None,
                 name=None):
        """Specify a local level model.

    Args:
      level_scale_prior: optional `tfd.Distribution` instance specifying a prior
        on the `level_scale` parameter. If `None`, a heuristic default prior is
        constructed based on the provided `observed_time_series`.
        Default value: `None`.
      initial_level_prior: optional `tfd.Distribution` instance specifying a
        prior on the initial level. If `None`, a heuristic default prior is
        constructed based on the provided `observed_time_series`.
        Default value: `None`.
      observed_time_series: optional `float` `Tensor` of shape
        `batch_shape + [T, 1]` (omitting the trailing unit dimension is also
        supported when `T > 1`), specifying an observed time series.
        Any priors not explicitly set will be given default values according to
        the scale of the observed time series (or batch of time series). May
        optionally be an instance of `tfp.sts.MaskedTimeSeries`, which includes
        a mask `Tensor` to specify timesteps with missing observations.
        Default value: `None`.
      name: the name of this model component.
        Default value: 'LocalLevel'.
    """

        with tf.name_scope(name or 'LocalLevel') as name:

            dtype = dtype_util.common_dtype(
                [level_scale_prior, initial_level_prior])

            if observed_time_series is not None:
                _, observed_stddev, observed_initial = (
                    sts_util.empirical_statistics(observed_time_series))
            else:
                observed_stddev, observed_initial = (tf.convert_to_tensor(
                    value=1.,
                    dtype=dtype), tf.convert_to_tensor(value=0., dtype=dtype))

            # Heuristic default priors. Overriding these may dramatically
            # change inference performance and results.
            if level_scale_prior is None:
                level_scale_prior = tfd.LogNormal(loc=tf.math.log(
                    .05 * observed_stddev),
                                                  scale=3.,
                                                  name='level_scale_prior')
            if initial_level_prior is None:
                self._initial_state_prior = tfd.MultivariateNormalDiag(
                    loc=observed_initial[..., tf.newaxis],
                    scale_diag=(tf.abs(observed_initial) +
                                observed_stddev)[..., tf.newaxis],
                    name='initial_level_prior')
            else:
                self._initial_state_prior = tfd.MultivariateNormalDiag(
                    loc=initial_level_prior.mean()[..., tf.newaxis],
                    scale_diag=initial_level_prior.stddev()[..., tf.newaxis])

            super(LocalLevel, self).__init__(parameters=[
                Parameter(
                    'level_scale', level_scale_prior,
                    tfb.Chain([
                        tfb.AffineScalar(scale=observed_stddev),
                        tfb.Softplus()
                    ])),
            ],
                                             latent_size=1,
                                             name=name)
Beispiel #26
0
  def __init__(self,
               components,
               constant_offset=None,
               observation_noise_scale_prior=None,
               observed_time_series=None,
               name=None):
    """Specify a structural time series model representing a sum of components.

    Args:
      components: Python `list` of one or more StructuralTimeSeries instances.
        These must have unique names.
      constant_offset: optional `float` `Tensor` of shape broadcasting to
        `concat([batch_shape, [num_timesteps]]`) specifying a constant value
        added to the sum of outputs from the component models.
        This allows the components to model the shifted series
        `observed_time_series - constant_offset`. If `None`, this is set to the
        mean of the provided `observed_time_series`.
        Default value: `None`.
      observation_noise_scale_prior: optional `tfd.Distribution` instance
        specifying a prior on `observation_noise_scale`. If `None`, a heuristic
        default prior is constructed based on the provided
        `observed_time_series`.
        Default value: `None`.
      observed_time_series: optional `float` `Tensor` of shape
        `batch_shape + [T, 1]` (omitting the trailing unit dimension is also
        supported when `T > 1`), specifying an observed time series. This is
        used to set the constant offset, if not provided, and to construct a
        default heuristic `observation_noise_scale_prior` if not provided. May
        optionally be an instance of `tfp.sts.MaskedTimeSeries`, which includes
        a mask `Tensor` to specify timesteps with missing observations.
        Default value: `None`.
      name: Python `str` name of this model component; used as `name_scope`
        for ops created by this class.
        Default value: 'Sum'.

    Raises:
      ValueError: if components do not have unique names.
    """

    with tf.name_scope(name or 'Sum') as name:
      if observed_time_series is not None:
        observed_mean, observed_stddev, _ = (
            sts_util.empirical_statistics(observed_time_series))
      else:
        observed_mean, observed_stddev = 0., 1.

      if observation_noise_scale_prior is None:
        observation_noise_scale_prior = tfd.LogNormal(
            loc=tf.math.log(.01 * observed_stddev), scale=2.)

      dtype = dtype_util.common_dtype([constant_offset,
                                       observation_noise_scale_prior,
                                       observed_mean,
                                       observed_stddev])

      # Ensure that offsets have canonical shape `[..., num_timesteps]`.
      if constant_offset is None:
        constant_offset = tf.convert_to_tensor(
            observed_mean, dtype=dtype)[..., tf.newaxis]
      constant_offset *= tf.ones([1], dtype=dtype)

      # Check that components have unique names, to ensure that inherited
      # parameters will be assigned unique names.
      component_names = [c.name for c in components]
      if len(component_names) != len(set(component_names)):
        raise ValueError(
            'Components must have unique names: {}'.format(component_names))
      components_by_name = collections.OrderedDict(
          [(c.name, c) for c in components])

      # Build parameters list for the combined model, by inheriting parameters
      # from the component models in canonical order.
      parameters = [Parameter('observation_noise_scale',
                              observation_noise_scale_prior,
                              tfb.Chain([
                                  tfb.AffineScalar(scale=observed_stddev),
                                  tfb.Softplus()]))]
      for component in components:
        for parameter in component.parameters:
          parameters.append(Parameter(
              name='{}_{}'.format(component.name, parameter.name),
              prior=parameter.prior, bijector=parameter.bijector))

      self._components = components
      self._components_by_name = components_by_name
      self._constant_offset = constant_offset

      super(Sum, self).__init__(
          parameters=parameters,
          latent_size=sum(
              [component.latent_size for component in components]),
          name=name)
Beispiel #27
0
def bijectors(draw,
              bijector_name=None,
              batch_shape=None,
              event_dim=None,
              enable_vars=False):
    """Strategy for drawing Bijectors.

  The emitted bijector may be a basic bijector or an `Invert` of a basic
  bijector, but not a compound like `Chain`.

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    bijector_name: Optional Python `str`.  If given, the produced bijectors
      will all have this type.  If omitted, Hypothesis chooses one from
      the whitelist `TF2_FRIENDLY_BIJECTORS`.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      bijector.  Hypothesis will pick one if omitted.
    event_dim: Optional Python int giving the size of each of the underlying
      distribution's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all `tf.Tensor`s and not {`tf.Variable`, `tfp.util.DeferredTensor`
      `tfp.util.TransformedVariable`}

  Returns:
    bijectors: A strategy for drawing bijectors with the specified `batch_shape`
      (or an arbitrary one if omitted).
  """
    if bijector_name is None:
        bijector_name = draw(hps.sampled_from(TF2_FRIENDLY_BIJECTORS))
    if batch_shape is None:
        batch_shape = draw(tfp_hps.shapes())
    if event_dim is None:
        event_dim = draw(hps.integers(min_value=2, max_value=6))
    if bijector_name == 'Invert':
        underlying_name = draw(
            hps.sampled_from(sorted(set(TF2_FRIENDLY_BIJECTORS) - {'Invert'})))
        underlying = draw(
            bijectors(bijector_name=underlying_name,
                      batch_shape=batch_shape,
                      event_dim=event_dim,
                      enable_vars=enable_vars))
        return tfb.Invert(underlying, validate_args=True)
    if bijector_name == 'Inline':
        if enable_vars:
            scale = tf.Variable(1., name='scale')
        else:
            scale = 2.
        b = tfb.AffineScalar(scale=scale)

        inline = tfb.Inline(
            forward_fn=b.forward,
            inverse_fn=b.inverse,
            forward_log_det_jacobian_fn=lambda x: b.forward_log_det_jacobian(  # pylint: disable=g-long-lambda
                x,
                event_ndims=b.forward_min_event_ndims),
            forward_min_event_ndims=b.forward_min_event_ndims,
            is_constant_jacobian=b.is_constant_jacobian,
        )
        inline.b = b
        return inline
    if bijector_name == 'DiscreteCosineTransform':
        dct_type = draw(hps.integers(min_value=2, max_value=3))
        return tfb.DiscreteCosineTransform(validate_args=True,
                                           dct_type=dct_type)
    if bijector_name == 'PowerTransform':
        power = draw(hps.floats(min_value=0., max_value=10.))
        return tfb.PowerTransform(validate_args=True, power=power)

    bijector_params = draw(
        broadcasting_params(bijector_name,
                            batch_shape,
                            event_dim=event_dim,
                            enable_vars=enable_vars))
    ctor = getattr(tfb, bijector_name)
    return ctor(validate_args=True, **bijector_params)
Beispiel #28
0
    def __init__(self,
                 period,
                 frequency_multipliers,
                 allow_drift=True,
                 drift_scale_prior=None,
                 initial_state_prior=None,
                 observed_time_series=None,
                 name=None):
        """Specify a smooth seasonal effects model.

    Args:
      period: positive scalar `float` `Tensor` giving the number of timesteps
        required for the longest cyclic effect to repeat.
      frequency_multipliers: One-dimensional `float` `Tensor` listing the
        frequencies (cyclic components) included in the model, as multipliers of
        the base/fundamental frequency `2. * pi / period`. Each component is
        specified by the number of times it repeats per period, and adds two
        latent dimensions to the model. A smooth seasonal model that can
        represent any periodic function is given by `frequency_multipliers = [1,
        2, ..., floor(period / 2)]`. However, it is often desirable to enforce a
        smoothness assumption (and reduce the computational burden) by dropping
        some of the higher frequencies.
      allow_drift: optional Python `bool` specifying whether the seasonal
        effects can drift over time.  Setting this to `False`
        removes the `drift_scale` parameter from the model. This is
        mathematically equivalent to
        `drift_scale_prior = tfd.Deterministic(0.)`, but removing drift
        directly is preferred because it avoids the use of a degenerate prior.
        Default value: `True`.
      drift_scale_prior: optional `tfd.Distribution` instance specifying a prior
        on the `drift_scale` parameter. If `None`, a heuristic default prior is
        constructed based on the provided `observed_time_series`.
        Default value: `None`.
      initial_state_prior: instance of `tfd.MultivariateNormal` representing
        the prior distribution on the latent states. Must have event shape
        `[2 * len(frequency_multipliers)]`. If `None`, a heuristic default prior
        is constructed based on the provided `observed_time_series`.
      observed_time_series: optional `float` `Tensor` of shape
        `batch_shape + [T, 1]` (omitting the trailing unit dimension is also
        supported when `T > 1`), specifying an observed time series.
        Any priors not explicitly set will be given default values according to
        the scale of the observed time series (or batch of time series). May
        optionally be an instance of `tfp.sts.MaskedTimeSeries`, which includes
        a mask `Tensor` to specify timesteps with missing observations.
        Default value: `None`.
      name: the name of this model component.
        Default value: 'SmoothSeasonal'.

    """

        with tf.name_scope(name or 'SmoothSeasonal') as name:

            _, observed_stddev, observed_initial = (
                sts_util.empirical_statistics(observed_time_series)
                if observed_time_series is not None else (0., 1., 0.))

            latent_size = 2 * static_num_frequencies(frequency_multipliers)

            # Heuristic default priors. Overriding these may dramatically
            # change inference performance and results.
            if drift_scale_prior is None:
                drift_scale_prior = tfd.LogNormal(loc=tf.math.log(
                    .01 * observed_stddev),
                                                  scale=3.)

            if initial_state_prior is None:
                initial_state_scale = (tf.abs(observed_initial) +
                                       observed_stddev)[..., tf.newaxis]
                ones = tf.ones([latent_size], dtype=drift_scale_prior.dtype)
                initial_state_prior = tfd.MultivariateNormalDiag(
                    scale_diag=initial_state_scale * ones)

            self._initial_state_prior = initial_state_prior
            self._period = period
            self._frequency_multipliers = frequency_multipliers

            parameters = []
            if allow_drift:
                parameters.append(
                    Parameter(
                        'drift_scale', drift_scale_prior,
                        tfb.Chain([
                            tfb.AffineScalar(scale=observed_stddev),
                            tfb.Softplus()
                        ])))
            self._allow_drift = allow_drift

            super(SmoothSeasonal, self).__init__(parameters=parameters,
                                                 latent_size=latent_size,
                                                 name=name)
 def testImmutableScaleAssertion(self):
     with self.assertRaisesOpError("Argument `scale` must be non-zero"):
         b = tfb.AffineScalar(scale=0., validate_args=True)
         _ = self.evaluate(b.forward(1.))
 def testVariableScaleAssertion(self):
     v = tf.Variable(0.)
     self.evaluate(v.initializer)
     with self.assertRaisesOpError("Argument `scale` must be non-zero"):
         b = tfb.AffineScalar(scale=v, validate_args=True)
         _ = self.evaluate(b.forward(1.))