コード例 #1
0
    def testLogNormalLogNormalKL(self):
        batch_size = 6
        mu_a = np.array([3.0] * batch_size)
        sigma_a = np.array([1.0, 2.0, 3.0, 1.5, 2.5, 3.5])
        mu_b = np.array([-3.0] * batch_size)
        sigma_b = np.array([0.5, 1.0, 1.5, 2.0, 2.5, 3.0])

        ln_a = tfd.LogNormal(loc=mu_a, scale=sigma_a, validate_args=True)
        ln_b = tfd.LogNormal(loc=mu_b, scale=sigma_b, validate_args=True)

        kl = tfd.kl_divergence(ln_a, ln_b)
        kl_val = self.evaluate(kl)

        normal_a = tfd.Normal(loc=mu_a, scale=sigma_a, validate_args=True)
        normal_b = tfd.Normal(loc=mu_b, scale=sigma_b, validate_args=True)
        kl_expected_from_normal = tfd.kl_divergence(normal_a, normal_b)

        kl_expected_from_formula = (
            (mu_a - mu_b)**2 / (2 * sigma_b**2) + 0.5 *
            ((sigma_a**2 / sigma_b**2) - 1 - 2 * np.log(sigma_a / sigma_b)))

        x = ln_a.sample(int(2e5), seed=test_util.test_seed())
        kl_sample = tf.reduce_mean(ln_a.log_prob(x) - ln_b.log_prob(x), axis=0)
        kl_sample_ = self.evaluate(kl_sample)

        self.assertEqual(kl.shape, (batch_size, ))
        self.assertAllClose(kl_val, kl_expected_from_normal)
        self.assertAllClose(kl_val, kl_expected_from_formula)
        self.assertAllClose(kl_expected_from_formula,
                            kl_sample_,
                            atol=0.0,
                            rtol=1e-2)
コード例 #2
0
 def new(params,
         event_shape,
         loc_activation,
         scale_activation,
         validate_args=False,
         name="LogNormalLayer"):
   """Create the distribution instance from a `params` vector."""
   params = tf.convert_to_tensor(value=params, name='params')
   event_shape = dist_util.expand_to_vector(
       tf.convert_to_tensor(value=event_shape,
                            name='event_shape',
                            dtype=tf.int32),
       tensor_name='event_shape',
   )
   output_shape = tf.concat(
       [tf.shape(input=params)[:-1], event_shape],
       axis=0,
   )
   loc_params, scale_params = tf.split(params, 2, axis=-1)
   loc_params = tf.reshape(loc_activation(loc_params), output_shape)
   scale_params = tf.reshape(scale_activation(scale_params), output_shape)
   return tfd.Independent(
       tfd.LogNormal(loc=loc_params,
                     scale=scale_params,
                     validate_args=validate_args),
       reinterpreted_batch_ndims=tf.size(input=event_shape),
       name=name,
   )
コード例 #3
0
 def _as_distribution(self, r):
     scale = DeferredTensor(self._scale,
                            lambda x: tf.cast(x, r.dtype),
                            dtype=r.dtype)
     return tfd.LogNormal(loc=DeferredTensor(
         r, lambda x: tf.math.log(tf.math.softplus(x)) - 0.5 * scale**2.),
                          scale=scale)
コード例 #4
0
 def new(params,
         event_shape=(),
         softplus_scale=True,
         validate_args=False,
         name=None):
     """Create the distribution instance from a `params` vector."""
     with tf.compat.v1.name_scope(name, 'LogNormal', [params, event_shape]):
         params = tf.convert_to_tensor(value=params, name='params')
         event_shape = dist_util.expand_to_vector(tf.convert_to_tensor(
             value=event_shape, name='event_shape', dtype=tf.int32),
                                                  tensor_name='event_shape')
         output_shape = tf.concat([
             tf.shape(input=params)[:-1],
             event_shape,
         ],
                                  axis=0)
         loc_params, scale_params = tf.split(params, 2, axis=-1)
         if softplus_scale:
             scale_params = tf.math.softplus(
                 scale_params) + tfd.softplus_inverse(1.0)
         return tfd.Independent(
             tfd.LogNormal(loc=tf.reshape(loc_params, output_shape),
                           scale=tf.reshape(scale_params, output_shape),
                           validate_args=validate_args),
             reinterpreted_batch_ndims=tf.size(input=event_shape),
             validate_args=validate_args)
コード例 #5
0
 def testComposeFromNonTransformedDistribution(self):
   actual_log_normal = tfb.Exp()(tfd.Normal(0.5, 2.))
   expected_log_normal = tfd.LogNormal(0.5, 2.)
   x = tf.constant([0.1, 1., 5.])
   self.assertAllClose(
       *self.evaluate([actual_log_normal.log_prob(x),
                       expected_log_normal.log_prob(x)]),
       atol=0, rtol=1e-3)
コード例 #6
0
 def testHandlesKwargs(self):
   x = tfb.Exp()(tfd.Normal(0, 1), event_shape=[4])
   y = tfd.Independent(tfd.LogNormal(tf.zeros(4), 1), 1)
   z = tf.constant([[1., 2, 3, 4],
                    [0.5, 1.5, 2., 2.5]])
   self.assertAllClose(
       *self.evaluate([y.log_prob(z), x.log_prob(z)]),
       atol=0, rtol=1e-3)
コード例 #7
0
 def setUp(self):
     super(LogNormalSoftplusTest, self).setUp()
     self.dtype = np.float32
     s = 0.75
     self.model = tfp.glm.LogNormalSoftplus(s)
     self.expected = tfp.glm.CustomExponentialFamily(
         lambda mean: tfd.LogNormal(tf.math.log(mean) - 0.5 * s**2, s),
         tf.nn.softplus)
コード例 #8
0
 def testComposeFromTransformedDistribution(self):
   actual_log_normal = tfb.Exp()(tfd.TransformedDistribution(
       distribution=tfd.Normal(0, 1),
       bijector=tfb.AffineScalar(shift=0.5, scale=2.)))
   expected_log_normal = tfd.LogNormal(0.5, 2.)
   x = tf.constant([0.1, 1., 5.])
   self.assertAllClose(
       *self.evaluate([actual_log_normal.log_prob(x),
                       expected_log_normal.log_prob(x)]),
       atol=0, rtol=1e-3)
コード例 #9
0
    def testLogNormalCDF(self):
        loc, scale = 1.5, 0.4
        dist = tfd.LogNormal(loc=loc, scale=scale, validate_args=True)

        x = np.array([1e-4, 1.0, 2.0], dtype=np.float32)

        cdf = dist.cdf(x)
        analytical_cdf = .5 + .5 * tf.math.erf(
            (np.log(x) - loc) / (scale * np.sqrt(2)))
        self.assertAllClose(self.evaluate(cdf), self.evaluate(analytical_cdf))
コード例 #10
0
 def testLogNormalSample(self):
     loc, scale = 1.5, 0.4
     dist = tfd.LogNormal(loc=loc, scale=scale, validate_args=True)
     samples = self.evaluate(dist.sample(6000, seed=test_util.test_seed()))
     self.assertAllClose(np.mean(samples),
                         self.evaluate(dist.mean()),
                         atol=0.1)
     self.assertAllClose(np.std(samples),
                         self.evaluate(dist.stddev()),
                         atol=0.1)
コード例 #11
0
  def test_broadcast_batch_shapes(self):
    seed = test_util.test_seed(sampler_type='stateless')

    batch_shape = [3, 1, 4]
    partial_batch_shape = [2, 1]
    expected_broadcast_batch_shape = [3, 2, 4]

    # Build a model where parameters have different batch shapes.
    partial_batch_loc = self._build_placeholder(
        np.random.randn(*partial_batch_shape))
    full_batch_loc = self._build_placeholder(
        np.random.randn(*batch_shape))

    partial_scale_prior = tfd.LogNormal(
        loc=partial_batch_loc, scale=tf.ones_like(partial_batch_loc))
    full_scale_prior = tfd.LogNormal(
        loc=full_batch_loc, scale=tf.ones_like(full_batch_loc))
    loc_prior = tfd.Normal(loc=partial_batch_loc,
                           scale=tf.ones_like(partial_batch_loc))

    linear_trend = LocalLinearTrend(level_scale_prior=full_scale_prior,
                                    slope_scale_prior=full_scale_prior,
                                    initial_level_prior=loc_prior,
                                    initial_slope_prior=loc_prior)
    seasonal = Seasonal(num_seasons=3,
                        drift_scale_prior=partial_scale_prior,
                        initial_effect_prior=loc_prior)
    model = Sum([linear_trend, seasonal],
                observation_noise_scale_prior=partial_scale_prior)
    param_samples = [p.prior.sample(seed=seed) for p in model.parameters]
    ssm = model.make_state_space_model(num_timesteps=2,
                                       param_vals=param_samples)

    # Test that the model's batch shape matches the SSM's batch shape,
    # and that they both match the expected broadcast shape.
    self.assertAllEqual(model.batch_shape, ssm.batch_shape)

    (model_batch_shape_tensor_,
     ssm_batch_shape_tensor_) = self.evaluate((model.batch_shape_tensor(),
                                               ssm.batch_shape_tensor()))
    self.assertAllEqual(model_batch_shape_tensor_, ssm_batch_shape_tensor_)
    self.assertAllEqual(model_batch_shape_tensor_,
                        expected_broadcast_batch_shape)
コード例 #12
0
    def testLogNormalPDF(self):
        loc, scale = 1.5, 0.4
        dist = tfd.LogNormal(loc=loc, scale=scale, validate_args=True)

        x = np.array([1e-4, 1.0, 2.0], dtype=np.float32)

        log_pdf = dist.log_prob(x)
        analytical_log_pdf = -np.log(x * scale * np.sqrt(2 * np.pi)) - (
            np.log(x) - loc)**2 / (2. * scale**2)

        self.assertAllClose(self.evaluate(log_pdf), analytical_log_pdf)
コード例 #13
0
    def test_simple_regression_correctness(self):
        # Verify that optimizing a simple linear regression by gradient descent
        # recovers the known-correct weights.
        batch_shape = [4, 3]
        num_timesteps = 10
        num_features = 2
        design_matrix = self._build_placeholder(
            np.random.randn(*(batch_shape + [num_timesteps, num_features])))

        true_weights = self._build_placeholder([4., -3.])
        predicted_time_series = tf.linalg.matmul(design_matrix,
                                                 true_weights[..., tf.newaxis])

        linear_regression = LinearRegression(
            design_matrix=design_matrix,
            weights_prior=tfd.Independent(tfd.Cauchy(
                loc=self._build_placeholder(np.zeros([num_features])),
                scale=self._build_placeholder(np.ones([num_features]))),
                                          reinterpreted_batch_ndims=1))
        observation_noise_scale_prior = tfd.LogNormal(
            loc=self._build_placeholder(-2),
            scale=self._build_placeholder(0.1))
        model = Sum(
            components=[linear_regression],
            observation_noise_scale_prior=observation_noise_scale_prior)

        learnable_weights = tf.Variable(
            tf.zeros([num_features], dtype=true_weights.dtype))

        def build_loss():
            learnable_ssm = model.make_state_space_model(
                num_timesteps=num_timesteps,
                param_vals={
                    "LinearRegression/_weights": learnable_weights,
                    "observation_noise_scale":
                    observation_noise_scale_prior.mode()
                })
            return -learnable_ssm.log_prob(predicted_time_series)

        # We provide graph- and eager-mode optimization for TF 2.0 compatibility.
        num_train_steps = 80
        optimizer = tf1.train.AdamOptimizer(learning_rate=0.1)
        if tf.executing_eagerly():
            for _ in range(num_train_steps):
                optimizer.minimize(build_loss)
        else:
            train_op = optimizer.minimize(build_loss())
            self.evaluate(tf1.global_variables_initializer())
            for _ in range(num_train_steps):
                _ = self.evaluate(train_op)
        self.assertAllClose(*self.evaluate((true_weights, learnable_weights)),
                            atol=0.2)
コード例 #14
0
def test_get_mean_field_approximation_tree(
    flat_tree_test_data: TreeTestData, with_init_loc: bool
):
    test_tree = data_to_tensor_tree(flat_tree_test_data)
    taxon_count = test_tree.taxon_count
    tree_name = "tree_dist_name"

    init_loc: tp.Optional[tp.Dict[str, object]]
    if with_init_loc:
        init_loc = dict(tree=test_tree)
    else:
        init_loc = None

    model = tfd.JointDistributionNamed(
        dict(
            pop_size=tfd.LogNormal(_constant(0.0), _constant(1.0)),
            tree=lambda pop_size: ConstantCoalescent(
                taxon_count, pop_size, test_tree.sampling_times, tree_name=tree_name
            ),
            obs=lambda tree: tfd.Normal(
                _constant(0.0), tf.reduce_sum(tree.branch_lengths)
            ),
        )
    )
    obs = _constant([10.0])
    pinned = model.experimental_pin(obs=obs)
    approximation = get_fixed_topology_mean_field_approximation(
        pinned,
        dtype=DEFAULT_FLOAT_DTYPE_TF,
        topology_pins={tree_name: test_tree.topology},
        init_loc=init_loc,
    )

    sample = approximation.sample()
    assert (
        tf.reduce_all(
            sample["tree"].topology.parent_indices == test_tree.topology.parent_indices
        )
        .numpy()
        .item()
    )
    assert_allclose(
        sample["tree"].sampling_times.numpy(), test_tree.sampling_times.numpy()
    )
    model_log_prob = pinned.unnormalized_log_prob(sample)
    approx_log_prob = approximation.log_prob(sample)
    assert np.isfinite(model_log_prob.numpy())
    assert np.isfinite(approx_log_prob.numpy())
コード例 #15
0
  def testLogNormalStats(self):

    loc = np.float32([3., 1.5])
    scale = np.float32([0.4, 1.1])
    dist = tfd.LogNormal(loc=loc, scale=scale, validate_args=True)

    self.assertAllClose(self.evaluate(dist.mean()),
                        np.exp(loc + scale**2 / 2))
    self.assertAllClose(self.evaluate(dist.variance()),
                        (np.exp(scale**2) - 1) * np.exp(2 * loc + scale**2))
    self.assertAllClose(self.evaluate(dist.stddev()),
                        np.sqrt(self.evaluate(dist.variance())))
    self.assertAllClose(self.evaluate(dist.mode()),
                        np.exp(loc - scale**2))
    self.assertAllClose(self.evaluate(dist.entropy()),
                        np.log(scale * np.exp(loc + 0.5) * np.sqrt(2 * np.pi)))
コード例 #16
0
    def test_nested_partial_value(self, sample_fn):
        innermost = tfd.JointDistributionNamed({
            'a':
            tfd.Exponential(1.),
            'b':
            lambda a: tfd.Sample(tfd.LogNormal(a, a), [5]),
        })

        inner = tfd.JointDistributionNamed({
            'c': tfd.Exponential(1.),
            'd': innermost,
        })

        outer = tfd.JointDistributionNamed({
            'e': tfd.Exponential(1.),
            'f': inner,
        })

        seed = test_util.test_seed(sampler_type='stateless')
        true_xs = outer.sample(seed=seed)

        def _update(dict_, **kwargs):
            dict_.copy().update(**kwargs)
            return dict_

        # These asserts work because we advance the stateless seed inside the model
        # whether or not a sample is actually generated.
        partial_xs = _update(true_xs, f=None)
        xs = sample_fn(outer, value=partial_xs, seed=seed)
        self.assertAllCloseNested(true_xs, xs)

        partial_xs = _update(true_xs, e=None)
        xs = sample_fn(outer, value=partial_xs, seed=seed)
        self.assertAllCloseNested(true_xs, xs)

        partial_xs = _update(true_xs, f=_update(true_xs['f'], d=None))
        xs = sample_fn(outer, value=partial_xs, seed=seed)
        self.assertAllCloseNested(true_xs, xs)

        partial_xs = _update(true_xs,
                             f=_update(true_xs['f'],
                                       d=_update(true_xs['f']['d'], a=None)))
        xs = sample_fn(outer, value=partial_xs, seed=seed)
        self.assertAllCloseNested(true_xs, xs)
コード例 #17
0
def test_get_mean_field_approximation():
    sample_size = 3
    model = tfd.JointDistributionNamed(
        dict(
            a=tfd.Normal(_constant(0.0), _constant(1.0)),
            b=lambda a: tfd.Sample(tfd.LogNormal(a, _constant(1.0)), sample_size),
            obs=lambda b: tfd.Independent(
                tfd.Normal(b, _constant(1.0)), reinterpreted_batch_ndims=1
            ),
        )
    )
    obs = _constant([-1.1, 2.1, 0.1])
    pinned = model.experimental_pin(obs=obs)
    approximation = get_mean_field_approximation(
        pinned, init_loc=dict(a=_constant(0.1)), dtype=DEFAULT_FLOAT_DTYPE_TF
    )
    sample = approximation.sample()
    model_log_prob = pinned.unnormalized_log_prob(sample)
    approx_log_prob = approximation.log_prob(sample)
    assert np.isfinite(model_log_prob.numpy())
    assert np.isfinite(approx_log_prob.numpy())
コード例 #18
0
  def test_kahan_precision(self, jit=False):
    maybe_jit = lambda f: f
    if jit:
      self.skip_if_no_xla()
      maybe_jit = tf.function(jit_compile=True)

    n = 2_500
    stream = test_util.test_seed_stream()
    samps = tfd.Normal(0, 1).sample(n, seed=stream())

    scale = tfd.LogNormal(0, .2).sample([7, 1], seed=stream())
    mvn = tfd.MultivariateNormalTriL(
        loc=tf.zeros([n]), scale_tril=tf.linalg.diag(tf.zeros([n]) + scale),
        experimental_use_kahan_sum=True)
    mvn64 = tfd.MultivariateNormalTriL(
        loc=tf.zeros([n], dtype=tf.float64),
        scale_tril=tf.linalg.diag(
            tf.zeros([n], dtype=tf.float64) + tf.cast(scale, tf.float64)))
    lp = maybe_jit(mvn.log_prob)(samps)
    lp64 = mvn64.log_prob(tf.cast(samps, tf.float64))
    lp, lp64 = self.evaluate((tf.cast(lp, tf.float64), lp64))
    # With fast-math off, without Kahan fails 15-100%, max-abs-error ~.006
    self.assertAllClose(lp64, lp, rtol=0, atol=.0008)
コード例 #19
0
ファイル: seasonal.py プロジェクト: bdzhuxiaoning/probability
    def __init__(self,
                 num_seasons,
                 num_steps_per_season=1,
                 drift_scale_prior=None,
                 initial_effect_prior=None,
                 constrain_mean_effect_to_zero=True,
                 observed_time_series=None,
                 name=None):
        """Specify a seasonal effects model.

    Args:
      num_seasons: Scalar Python `int` number of seasons.
      num_steps_per_season: Python `int` number of steps in each
        season. This may be either a scalar (shape `[]`), in which case all
        seasons have the same length, or a NumPy array of shape `[num_seasons]`,
        in which seasons have different length, but remain constant around
        different cycles, or a NumPy array of shape `[num_cycles, num_seasons]`,
        in which num_steps_per_season for each season also varies in different
        cycle (e.g., a 4 years cycle with leap day).
        Default value: 1.
      drift_scale_prior: optional `tfd.Distribution` instance specifying a prior
        on the `drift_scale` parameter. If `None`, a heuristic default prior is
        constructed based on the provided `observed_time_series`.
        Default value: `None`.
      initial_effect_prior: optional `tfd.Distribution` instance specifying a
        normal prior on the initial effect of each season. This may be either
        a scalar `tfd.Normal` prior, in which case it applies independently to
        every season, or it may be multivariate normal (e.g.,
        `tfd.MultivariateNormalDiag`) with event shape `[num_seasons]`, in
        which case it specifies a joint prior across all seasons. If `None`, a
        heuristic default prior is constructed based on the provided
        `observed_time_series`.
        Default value: `None`.
      constrain_mean_effect_to_zero: if `True`, use a model parameterization
        that constrains the mean effect across all seasons to be zero. This
        constraint is generally helpful in identifying the contributions of
        different model components and can lead to more interpretable
        posterior decompositions. It may be undesirable if you plan to directly
        examine the latent space of the underlying state space model.
        Default value: `True`.
      observed_time_series: optional `float` `Tensor` of shape
        `batch_shape + [T, 1]` (omitting the trailing unit dimension is also
        supported when `T > 1`), specifying an observed time series.
        Any priors not explicitly set will be given default values according to
        the scale of the observed time series (or batch of time series). May
        optionally be an instance of `tfp.sts.MaskedTimeSeries`, which includes
        a mask `Tensor` to specify timesteps with missing observations.
        Default value: `None`.
      name: the name of this model component.
        Default value: 'Seasonal'.
    """

        with tf.compat.v1.name_scope(name,
                                     'Seasonal',
                                     values=[observed_time_series]) as name:

            _, observed_stddev, observed_initial = (
                sts_util.empirical_statistics(observed_time_series)
                if observed_time_series is not None else (0., 1., 0.))

            # Heuristic default priors. Overriding these may dramatically
            # change inference performance and results.
            if drift_scale_prior is None:
                drift_scale_prior = tfd.LogNormal(loc=tf.math.log(
                    .01 * observed_stddev),
                                                  scale=3.)
            if initial_effect_prior is None:
                initial_effect_prior = tfd.Normal(
                    loc=observed_initial,
                    scale=tf.abs(observed_initial) + observed_stddev)

            dtype = tf.debugging.assert_same_float_dtype(
                [drift_scale_prior, initial_effect_prior])

            if isinstance(initial_effect_prior, tfd.Normal):
                initial_state_prior = tfd.MultivariateNormalDiag(
                    loc=tf.stack([initial_effect_prior.mean()] * num_seasons,
                                 axis=-1),
                    scale_diag=tf.stack([initial_effect_prior.stddev()] *
                                        num_seasons,
                                        axis=-1))
            else:
                initial_state_prior = initial_effect_prior

            if constrain_mean_effect_to_zero:
                # Transform the prior to the residual parameterization used by
                # `ConstrainedSeasonalStateSpaceModel`, imposing a zero-sum constraint.
                # This doesn't change the marginal prior on individual effects, but
                # does introduce dependence between the effects.
                (effects_to_residuals,
                 _) = build_effects_to_residuals_matrix(num_seasons,
                                                        dtype=dtype)
                effects_to_residuals_linop = tf.linalg.LinearOperatorFullMatrix(
                    effects_to_residuals
                )  # Use linop so that matmul broadcasts.
                initial_state_prior_loc = effects_to_residuals_linop.matvec(
                    initial_state_prior.mean())
                initial_state_prior_scale_linop = effects_to_residuals_linop.matmul(
                    initial_state_prior.scale)  # returns LinearOperator
                initial_state_prior = tfd.MultivariateNormalFullCovariance(
                    loc=initial_state_prior_loc,
                    covariance_matrix=initial_state_prior_scale_linop.matmul(
                        initial_state_prior_scale_linop.to_dense(),
                        adjoint_arg=True))

            self._constrain_mean_effect_to_zero = constrain_mean_effect_to_zero
            self._initial_state_prior = initial_state_prior
            self._num_seasons = num_seasons
            self._num_steps_per_season = num_steps_per_season

            super(Seasonal, self).__init__(
                parameters=[
                    Parameter('drift_scale', drift_scale_prior,
                              tfb.Softplus()),
                ],
                latent_size=(num_seasons -
                             1 if self.constrain_mean_effect_to_zero else
                             num_seasons),
                name=name)
コード例 #20
0
    def __init__(self,
                 num_seasons,
                 num_steps_per_season=1,
                 drift_scale_prior=None,
                 initial_effect_prior=None,
                 observed_time_series=None,
                 name=None):
        """Specify a seasonal effects model.

    Args:
      num_seasons: Scalar Python `int` number of seasons.
      num_steps_per_season: Python `int` number of steps in each
        season. This may be either a scalar (shape `[]`), in which case all
        seasons have the same length, or a NumPy array of shape `[num_seasons]`.
        Default value: 1.
      drift_scale_prior: optional `tfd.Distribution` instance specifying a prior
        on the `drift_scale` parameter. If `None`, a heuristic default prior is
        constructed based on the provided `observed_time_series`.
        Default value: `None`.
      initial_effect_prior: optional `tfd.Distribution` instance specifying a
        normal prior on the initial effect of each season. This may be either
        a scalar `tfd.Normal` prior, in which case it applies independently to
        every season, or it may be multivariate normal (e.g.,
        `tfd.MultivariateNormalDiag`) with event shape `[num_seasons]`, in
        which case it specifies a joint prior across all seasons. If `None`, a
        heuristic default prior is constructed based on the provided
        `observed_time_series`.
        Default value: `None`.
      observed_time_series: optional `float` `Tensor` of shape
        `batch_shape + [T, 1]` (omitting the trailing unit dimension is also
        supported when `T > 1`), specifying an observed time series.
        Any priors not explicitly set will be given default values according to
        the scale of the observed time series (or batch of time series).
        Default value: `None`.
      name: the name of this model component.
        Default value: 'Seasonal'.
    """

        with tf.compat.v1.name_scope(name,
                                     'Seasonal',
                                     values=[observed_time_series]) as name:

            observed_stddev, observed_initial = (
                sts_util.empirical_statistics(observed_time_series)
                if observed_time_series is not None else (1., 0.))

            # Heuristic default priors. Overriding these may dramatically
            # change inference performance and results.
            if drift_scale_prior is None:
                drift_scale_prior = tfd.LogNormal(loc=tf.math.log(
                    .01 * observed_stddev),
                                                  scale=3.)
            if initial_effect_prior is None:
                initial_effect_prior = tfd.Normal(
                    loc=observed_initial,
                    scale=tf.abs(observed_initial) + observed_stddev)

            self._num_seasons = num_seasons
            self._num_steps_per_season = num_steps_per_season

            tf.debugging.assert_same_float_dtype(
                [drift_scale_prior, initial_effect_prior])

            if isinstance(initial_effect_prior, tfd.Normal):
                self._initial_state_prior = tfd.MultivariateNormalDiag(
                    loc=tf.stack([initial_effect_prior.mean()] * num_seasons,
                                 axis=-1),
                    scale_diag=tf.stack([initial_effect_prior.stddev()] *
                                        num_seasons,
                                        axis=-1))
            else:
                self._initial_state_prior = initial_effect_prior

            super(Seasonal, self).__init__(parameters=[
                Parameter('drift_scale', drift_scale_prior, tfb.Softplus()),
            ],
                                           latent_size=num_seasons,
                                           name=name)
コード例 #21
0
    def __init__(self,
                 components,
                 constant_offset=None,
                 observation_noise_scale_prior=None,
                 observed_time_series=None,
                 name=None):
        """Specify a structural time series model representing a sum of components.

    Args:
      components: Python `list` of one or more StructuralTimeSeries instances.
        These must have unique names.
      constant_offset: optional `float` `Tensor` of shape broadcasting to
        `concat([batch_shape, [num_timesteps]]`) specifying a constant value
        added to the sum of outputs from the component models.
        This allows the components to model the shifted series
        `observed_time_series - constant_offset`. If `None`, this is set to the
        mean of the provided `observed_time_series`.
        Default value: `None`.
      observation_noise_scale_prior: optional `tfd.Distribution` instance
        specifying a prior on `observation_noise_scale`. If `None`, a heuristic
        default prior is constructed based on the provided
        `observed_time_series`.
        Default value: `None`.
      observed_time_series: optional `float` `Tensor` of shape
        `batch_shape + [T, 1]` (omitting the trailing unit dimension is also
        supported when `T > 1`), specifying an observed time series. This is
        used to set the constant offset, if not provided, and to construct a
        default heuristic `observation_noise_scale_prior` if not provided. May
        optionally be an instance of `tfp.sts.MaskedTimeSeries`, which includes
        a mask `Tensor` to specify timesteps with missing observations.
        Default value: `None`.
      name: Python `str` name of this model component; used as `name_scope`
        for ops created by this class.
        Default value: 'Sum'.

    Raises:
      ValueError: if components do not have unique names.
    """

        with tf.name_scope(name or 'Sum') as name:
            if observed_time_series is not None:
                observed_mean, observed_stddev, _ = (
                    sts_util.empirical_statistics(observed_time_series))
            else:
                observed_mean, observed_stddev = 0., 1.

            if observation_noise_scale_prior is None:
                observation_noise_scale_prior = tfd.LogNormal(loc=tf.math.log(
                    .01 * observed_stddev),
                                                              scale=2.)

            dtype = dtype_util.common_dtype([
                constant_offset, observation_noise_scale_prior, observed_mean,
                observed_stddev
            ])

            # Ensure that offsets have canonical shape `[..., num_timesteps]`.
            if constant_offset is None:
                constant_offset = tf.convert_to_tensor(observed_mean,
                                                       dtype=dtype)[...,
                                                                    tf.newaxis]
            constant_offset *= tf.ones([1], dtype=dtype)

            # Check that components have unique names, to ensure that inherited
            # parameters will be assigned unique names.
            component_names = [c.name for c in components]
            if len(component_names) != len(set(component_names)):
                raise ValueError(
                    'Components must have unique names: {}'.format(
                        component_names))
            components_by_name = collections.OrderedDict([(c.name, c)
                                                          for c in components])

            # Build parameters list for the combined model, by inheriting parameters
            # from the component models in canonical order.
            parameters = [
                Parameter(
                    'observation_noise_scale', observation_noise_scale_prior,
                    tfb.Chain(
                        [tfb.Scale(scale=observed_stddev),
                         tfb.Softplus()]))
            ]
            for component in components:
                for parameter in component.parameters:
                    parameters.append(
                        Parameter(name='{}_{}'.format(component.name,
                                                      parameter.name),
                                  prior=parameter.prior,
                                  bijector=parameter.bijector))

            self._components = components
            self._components_by_name = components_by_name
            self._constant_offset = constant_offset

            super(Sum, self).__init__(parameters=parameters,
                                      latent_size=sum([
                                          component.latent_size
                                          for component in components
                                      ]),
                                      name=name)
コード例 #22
0
  def __init__(self,
               order,
               coefficients_prior=None,
               level_scale_prior=None,
               initial_state_prior=None,
               coefficient_constraining_bijector=None,
               observed_time_series=None,
               name=None):
    """Specify an autoregressive model.

    Args:
      order: scalar Python positive `int` specifying the number of past
        timesteps to regress on.
      coefficients_prior: optional `tfd.Distribution` instance specifying a
        prior on the `coefficients` parameter. If `None`, a default standard
        normal (`tfd.MultivariateNormalDiag(scale_diag=tf.ones([order]))`) prior
        is used.
        Default value: `None`.
      level_scale_prior: optional `tfd.Distribution` instance specifying a prior
        on the `level_scale` parameter. If `None`, a heuristic default prior is
        constructed based on the provided `observed_time_series`.
        Default value: `None`.
      initial_state_prior: optional `tfd.Distribution` instance specifying a
        prior on the initial state, corresponding to the values of the process
        at a set of size `order` of imagined timesteps before the initial step.
        If `None`, a heuristic default prior is constructed based on the
        provided `observed_time_series`.
        Default value: `None`.
      coefficient_constraining_bijector: optional `tfb.Bijector` instance
        representing a constraining mapping for the autoregressive coefficients.
        For example, `tfb.Tanh()` constrains the coefficients to lie in
        `(-1, 1)`, while `tfb.Softplus()` constrains them to be positive, and
        `tfb.Identity()` implies no constraint. If `None`, the default behavior
        constrains the coefficients to lie in `(-1, 1)` using a `Tanh` bijector.
        Default value: `None`.
      observed_time_series: optional `float` `Tensor` of shape
        `batch_shape + [T, 1]` (omitting the trailing unit dimension is also
        supported when `T > 1`), specifying an observed time series.
        Any priors not explicitly set will be given default values according to
        the scale of the observed time series (or batch of time series). May
        optionally be an instance of `tfp.sts.MaskedTimeSeries`, which includes
        a mask `Tensor` to specify timesteps with missing observations.
        Default value: `None`.
      name: the name of this model component.
        Default value: 'Autoregressive'.
    """
    with tf.name_scope(name or 'Autoregressive') as name:
      masked_time_series = None
      if observed_time_series is not None:
        masked_time_series = (
            sts_util.canonicalize_observed_time_series_with_mask(
                observed_time_series))

      dtype = dtype_util.common_dtype(
          [(masked_time_series.time_series
            if masked_time_series is not None else None),
           coefficients_prior,
           level_scale_prior,
           initial_state_prior], dtype_hint=tf.float32)

      if observed_time_series is not None:
        _, observed_stddev, observed_initial = sts_util.empirical_statistics(
            masked_time_series)
      else:
        observed_stddev, observed_initial = (
            tf.convert_to_tensor(value=1., dtype=dtype),
            tf.convert_to_tensor(value=0., dtype=dtype))
      batch_ones = tf.ones(tf.concat([
          tf.shape(observed_initial),  # Batch shape
          [order]], axis=0), dtype=dtype)

      # Heuristic default priors. Overriding these may dramatically
      # change inference performance and results.
      if coefficients_prior is None:
        coefficients_prior = tfd.MultivariateNormalDiag(
            scale_diag=batch_ones)
      if level_scale_prior is None:
        level_scale_prior = tfd.LogNormal(
            loc=tf.math.log(0.05 *  observed_stddev), scale=3.)

      if (coefficients_prior.event_shape.is_fully_defined() and
          order != coefficients_prior.event_shape[0]):
        raise ValueError("Prior dimension {} doesn't match order {}.".format(
            coefficients_prior.event_shape[0], order))

      if initial_state_prior is None:
        initial_state_prior = tfd.MultivariateNormalDiag(
            loc=observed_initial[..., tf.newaxis] * batch_ones,
            scale_diag=(tf.abs(observed_initial) +
                        observed_stddev)[..., tf.newaxis] * batch_ones)

      self._order = order
      self._coefficients_prior = coefficients_prior
      self._level_scale_prior = level_scale_prior
      self._initial_state_prior = initial_state_prior

      if coefficient_constraining_bijector is None:
        coefficient_constraining_bijector = tfb.Tanh()
      super(Autoregressive, self).__init__(
          parameters=[
              Parameter('coefficients',
                        coefficients_prior,
                        coefficient_constraining_bijector),
              Parameter('level_scale', level_scale_prior,
                        tfb.Chain([tfb.AffineScalar(scale=observed_stddev),
                                   tfb.Softplus()]))
          ],
          latent_size=order,
          name=name)
コード例 #23
0
    def __init__(self,
                 level_scale_prior=None,
                 slope_mean_prior=None,
                 slope_scale_prior=None,
                 autoregressive_coef_prior=None,
                 initial_level_prior=None,
                 initial_slope_prior=None,
                 observed_time_series=None,
                 constrain_ar_coef_stationary=True,
                 constrain_ar_coef_positive=False,
                 name=None):
        """Specify a semi-local linear trend model.

    Args:
      level_scale_prior: optional `tfd.Distribution` instance specifying a prior
        on the `level_scale` parameter. If `None`, a heuristic default prior is
        constructed based on the provided `observed_time_series`.
        Default value: `None`.
      slope_mean_prior: optional `tfd.Distribution` instance specifying a prior
        on the `slope_mean` parameter. If `None`, a heuristic default prior is
        constructed based on the provided `observed_time_series`.
        Default value: `None`.
      slope_scale_prior: optional `tfd.Distribution` instance specifying a prior
        on the `slope_scale` parameter. If `None`, a heuristic default prior is
        constructed based on the provided `observed_time_series`.
        Default value: `None`.
      autoregressive_coef_prior: optional `tfd.Distribution` instance specifying
        a prior on the `autoregressive_coef` parameter. If `None`, the default
        prior is a standard `Normal(0., 1.)`. Note that the prior may be
        implicitly truncated by `constrain_ar_coef_stationary` and/or
        `constrain_ar_coef_positive`.
        Default value: `None`.
      initial_level_prior: optional `tfd.Distribution` instance specifying a
        prior on the initial level. If `None`, a heuristic default prior is
        constructed based on the provided `observed_time_series`.
        Default value: `None`.
      initial_slope_prior: optional `tfd.Distribution` instance specifying a
        prior on the initial slope. If `None`, a heuristic default prior is
        constructed based on the provided `observed_time_series`.
        Default value: `None`.
      observed_time_series: optional `float` `Tensor` of shape
        `batch_shape + [T, 1]` (omitting the trailing unit dimension is also
        supported when `T > 1`), specifying an observed time series.
        Any priors not explicitly set will be given default values according to
        the scale of the observed time series (or batch of time series). May
        optionally be an instance of `tfp.sts.MaskedTimeSeries`, which includes
        a mask `Tensor` to specify timesteps with missing observations.
        Default value: `None`.
      constrain_ar_coef_stationary: if `True`, perform inference using a
        parameterization that restricts `autoregressive_coef` to the interval
        `(-1, 1)`, or `(0, 1)` if `force_positive_ar_coef` is also `True`,
        corresponding to stationary processes. This will implicitly truncates
        the support of `autoregressive_coef_prior`.
        Default value: `True`.
      constrain_ar_coef_positive: if `True`, perform inference using a
        parameterization that restricts `autoregressive_coef` to be positive,
        or in `(0, 1)` if `constrain_ar_coef_stationary` is also `True`. This
        will implicitly truncate the support of `autoregressive_coef_prior`.
        Default value: `False`.
      name: the name of this model component.
        Default value: 'SemiLocalLinearTrend'.
    """

        with tf.name_scope(name or 'SemiLocalLinearTrend') as name:
            if observed_time_series is not None:
                _, observed_stddev, observed_initial = sts_util.empirical_statistics(
                    observed_time_series)
            else:
                observed_stddev, observed_initial = 1., 0.

            # Heuristic default priors. Overriding these may dramatically
            # change inference performance and results.
            if level_scale_prior is None:
                level_scale_prior = tfd.LogNormal(loc=tf.math.log(
                    .01 * observed_stddev),
                                                  scale=2.)
            if slope_mean_prior is None:
                slope_mean_prior = tfd.Normal(loc=0., scale=observed_stddev)
            if slope_scale_prior is None:
                slope_scale_prior = tfd.LogNormal(loc=tf.math.log(
                    .01 * observed_stddev),
                                                  scale=2.)
            if autoregressive_coef_prior is None:
                autoregressive_coef_prior = tfd.Normal(
                    loc=0., scale=tf.ones_like(observed_initial))
            if initial_level_prior is None:
                initial_level_prior = tfd.Normal(
                    loc=observed_initial,
                    scale=tf.abs(observed_initial) + observed_stddev)
            if initial_slope_prior is None:
                initial_slope_prior = tfd.Normal(loc=0., scale=observed_stddev)

            self._initial_state_prior = tfd.MultivariateNormalDiag(
                loc=tf.stack(
                    [initial_level_prior.mean(),
                     initial_slope_prior.mean()],
                    axis=-1),
                scale_diag=tf.stack([
                    initial_level_prior.stddev(),
                    initial_slope_prior.stddev()
                ],
                                    axis=-1))

            # Constrain the support of the autoregressive coefficient.
            if constrain_ar_coef_stationary and constrain_ar_coef_positive:
                autoregressive_coef_bijector = tfb.Sigmoid(
                )  # support in (0, 1)
            elif constrain_ar_coef_positive:
                autoregressive_coef_bijector = tfb.Softplus(
                )  # support in (0, infty)
            elif constrain_ar_coef_stationary:
                autoregressive_coef_bijector = tfb.Tanh()  # support in (-1, 1)
            else:
                autoregressive_coef_bijector = tfb.Identity()  # unconstrained

            stddev_preconditioner = tfb.Scale(scale=observed_stddev)
            scaled_softplus = tfb.Chain(
                [stddev_preconditioner, tfb.Softplus()])
            super(SemiLocalLinearTrend, self).__init__(parameters=[
                Parameter('level_scale', level_scale_prior, scaled_softplus),
                Parameter('slope_mean', slope_mean_prior,
                          stddev_preconditioner),
                Parameter('slope_scale', slope_scale_prior, scaled_softplus),
                Parameter('autoregressive_coef', autoregressive_coef_prior,
                          autoregressive_coef_bijector),
            ],
                                                       latent_size=2,
                                                       name=name)
コード例 #24
0
ファイル: util_test.py プロジェクト: qoffee/probability
 def testGradientWorksDespiteBijectorCaching(self):
     x = tf.constant(2.)
     fn_result, grads = util.maybe_call_fn_and_grads(
         lambda x_: tfd.LogNormal(loc=0., scale=1.).log_prob(x_), x)
     self.assertAllEqual(False, fn_result is None)
     self.assertAllEqual([False], [g is None for g in grads])
コード例 #25
0
  def __init__(self,
               components,
               observation_noise_scale_prior=None,
               observed_time_series=None,
               name=None):
    """Specify a structural time series model representing a sum of components.

    Args:
      components: Python `list` of one or more StructuralTimeSeries instances.
        These must have unique names.
      observation_noise_scale_prior: optional `tfd.Distribution` instance
        specifying a prior on `observation_noise_scale`. If `None`, a heuristic
        default prior is constructed based on the provided
        `observed_time_series`.
        Default value: `None`.
      observed_time_series: optional `float` `Tensor` of shape
        `batch_shape + [T, 1]` (omitting the trailing unit dimension is also
        supported when `T > 1`), specifying an observed time series. This is
        used only if `observation_noise_scale_prior` is not provided, to
        construct a default heuristic prior.
        Default value: `None`.
      name: Python `str` name of this model component; used as `name_scope`
        for ops created by this class.
        Default value: 'Sum'.

    Raises:
      ValueError: if components do not have unique names.
    """

    with tf.compat.v1.name_scope(
        name, 'Sum', values=[observed_time_series]) as name:
      if observation_noise_scale_prior is None:
        observed_stddev, _ = (
            sts_util.empirical_statistics(observed_time_series)
            if observed_time_series is not None else (1., 0.))
        observation_noise_scale_prior = tfd.LogNormal(
            loc=tf.math.log(.01 * observed_stddev), scale=2.)

      # Check that components have unique names, to ensure that inherited
      # parameters will be assigned unique names.
      component_names = [c.name for c in components]
      if len(component_names) != len(set(component_names)):
        raise ValueError(
            'Components must have unique names: {}'.format(component_names))
      components_by_name = collections.OrderedDict(
          [(c.name, c) for c in components])

      # Build parameters list for the combined model, by inheriting parameters
      # from the component models in canonical order.
      parameters = [
          Parameter('observation_noise_scale', observation_noise_scale_prior,
                    tfb.Softplus()),
      ] + [Parameter(name='{}_{}'.format(component.name, parameter.name),
                     prior=parameter.prior,
                     bijector=parameter.bijector)
           for component in components for parameter in component.parameters]

      self._components = components
      self._components_by_name = components_by_name

      super(Sum, self).__init__(
          parameters=parameters,
          latent_size=sum(
              [component.latent_size for component in components]),
          name=name)
コード例 #26
0
    def __init__(self,
                 level_scale_prior=None,
                 initial_level_prior=None,
                 observed_time_series=None,
                 name=None):
        """Specify a local level model.

    Args:
      level_scale_prior: optional `tfd.Distribution` instance specifying a prior
        on the `level_scale` parameter. If `None`, a heuristic default prior is
        constructed based on the provided `observed_time_series`.
        Default value: `None`.
      initial_level_prior: optional `tfd.Distribution` instance specifying a
        prior on the initial level. If `None`, a heuristic default prior is
        constructed based on the provided `observed_time_series`.
        Default value: `None`.
      observed_time_series: optional `float` `Tensor` of shape
        `batch_shape + [T, 1]` (omitting the trailing unit dimension is also
        supported when `T > 1`), specifying an observed time series.
        Any priors not explicitly set will be given default values according to
        the scale of the observed time series (or batch of time series). May
        optionally be an instance of `tfp.sts.MaskedTimeSeries`, which includes
        a mask `Tensor` to specify timesteps with missing observations.
        Default value: `None`.
      name: the name of this model component.
        Default value: 'LocalLevel'.
    """

        with tf.name_scope(name or 'LocalLevel') as name:

            dtype = dtype_util.common_dtype(
                [level_scale_prior, initial_level_prior])

            if observed_time_series is not None:
                _, observed_stddev, observed_initial = (
                    sts_util.empirical_statistics(observed_time_series))
            else:
                observed_stddev, observed_initial = (tf.convert_to_tensor(
                    value=1.,
                    dtype=dtype), tf.convert_to_tensor(value=0., dtype=dtype))

            # Heuristic default priors. Overriding these may dramatically
            # change inference performance and results.
            if level_scale_prior is None:
                level_scale_prior = tfd.LogNormal(loc=tf.math.log(
                    .05 * observed_stddev),
                                                  scale=3.,
                                                  name='level_scale_prior')
            if initial_level_prior is None:
                self._initial_state_prior = tfd.MultivariateNormalDiag(
                    loc=observed_initial[..., tf.newaxis],
                    scale_diag=(tf.abs(observed_initial) +
                                observed_stddev)[..., tf.newaxis],
                    name='initial_level_prior')
            else:
                self._initial_state_prior = tfd.MultivariateNormalDiag(
                    loc=initial_level_prior.mean()[..., tf.newaxis],
                    scale_diag=initial_level_prior.stddev()[..., tf.newaxis])

            super(LocalLevel, self).__init__(parameters=[
                Parameter(
                    'level_scale', level_scale_prior,
                    tfb.Chain([
                        tfb.AffineScalar(scale=observed_stddev),
                        tfb.Softplus()
                    ])),
            ],
                                             latent_size=1,
                                             name=name)
コード例 #27
0
 def testSupportBijectorOutsideRange(self):
     dist = tfd.LogNormal(loc=1., scale=2., validate_args=True)
     with self.assertRaisesOpError('must be greater than 0'):
         dist._experimental_default_event_space_bijector().inverse(
             [-4.2, -1e-6, -1.3])
コード例 #28
0
    def __init__(self,
                 design_matrix,
                 drift_scale_prior=None,
                 initial_weights_prior=None,
                 observed_time_series=None,
                 name=None):
        """Specify a dynamic linear regression.

    Args:
      design_matrix: float `Tensor` of shape `concat([batch_shape,
        [num_timesteps, num_features]])`.
      drift_scale_prior: instance of `tfd.Distribution` specifying a prior on
        the `drift_scale` parameter. If `None`, a heuristic default prior is
        constructed based on the provided `observed_time_series`.
        Default value: `None`.
      initial_weights_prior: instance of `tfd.MultivariateNormal` representing
        the prior distribution on the latent states (the regression weights).
        Must have event shape `[num_features]`. If `None`, a weakly-informative
        Normal(0., 10.) prior is used.
        Default value: `None`.
      observed_time_series: optional `float` `Tensor` of shape
        `batch_shape + [T, 1]` (omitting the trailing unit dimension is also
        supported when `T > 1`), specifying an observed time series. Any `NaN`s
        are interpreted as missing observations; missingness may be also be
        explicitly specified by passing a `tfp.sts.MaskedTimeSeries` instance.
        Any priors not explicitly set will be given default values according to
        the scale of the observed time series (or batch of time series).
        Default value: `None`.
      name: Python `str` for the name of this component.
        Default value: 'DynamicLinearRegression'.

    """
        init_parameters = dict(locals())
        with tf.name_scope(name or 'DynamicLinearRegression') as name:
            dtype = dtype_util.common_dtype(
                [design_matrix, drift_scale_prior, initial_weights_prior])

            num_features = prefer_static.shape(design_matrix)[-1]

            # Default to a weakly-informative Normal(0., 10.) for the initital state
            if initial_weights_prior is None:
                initial_weights_prior = tfd.MultivariateNormalDiag(
                    scale_diag=10. * tf.ones([num_features], dtype=dtype))

            # Heuristic default priors. Overriding these may dramatically
            # change inference performance and results.
            if drift_scale_prior is None:
                if observed_time_series is None:
                    observed_stddev = tf.constant(1.0, dtype=dtype)
                else:
                    _, observed_stddev, _ = sts_util.empirical_statistics(
                        observed_time_series)

                drift_scale_prior = tfd.LogNormal(loc=tf.math.log(
                    .05 * observed_stddev),
                                                  scale=3.,
                                                  name='drift_scale_prior')

            self._initial_state_prior = initial_weights_prior
            self._design_matrix = design_matrix

            super(DynamicLinearRegression,
                  self).__init__(parameters=[
                      Parameter(
                          'drift_scale', drift_scale_prior,
                          tfb.Chain([
                              tfb.Scale(scale=observed_stddev),
                              tfb.Softplus()
                          ]))
                  ],
                                 latent_size=num_features,
                                 init_parameters=init_parameters,
                                 name=name)
コード例 #29
0
  def __init__(self,
               level_scale_prior=None,
               slope_scale_prior=None,
               initial_level_prior=None,
               initial_slope_prior=None,
               observed_time_series=None,
               name=None):
    """Specify a local linear trend model.

    Args:
      level_scale_prior: optional `tfd.Distribution` instance specifying a prior
        on the `level_scale` parameter. If `None`, a heuristic default prior is
        constructed based on the provided `observed_time_series`.
        Default value: `None`.
      slope_scale_prior: optional `tfd.Distribution` instance specifying a prior
        on the `slope_scale` parameter. If `None`, a heuristic default prior is
        constructed based on the provided `observed_time_series`.
        Default value: `None`.
      initial_level_prior: optional `tfd.Distribution` instance specifying a
        prior on the initial level. If `None`, a heuristic default prior is
        constructed based on the provided `observed_time_series`.
        Default value: `None`.
      initial_slope_prior: optional `tfd.Distribution` instance specifying a
        prior on the initial slope. If `None`, a heuristic default prior is
        constructed based on the provided `observed_time_series`.
        Default value: `None`.
      observed_time_series: optional `float` `Tensor` of shape
        `batch_shape + [T, 1]` (omitting the trailing unit dimension is also
        supported when `T > 1`), specifying an observed time series.
        Any priors not explicitly set will be given default values according to
        the scale of the observed time series (or batch of time series).
        Default value: `None`.
      name: the name of this model component.
        Default value: 'LocalLinearTrend'.
    """

    with tf.name_scope(
        name, 'LocalLinearTrend', values=[observed_time_series]) as name:

      observed_stddev, observed_initial = (
          sts_util.empirical_statistics(observed_time_series)
          if observed_time_series is not None else (1., 0.))

      # Heuristic default priors. Overriding these may dramatically
      # change inference performance and results.
      if level_scale_prior is None:
        level_scale_prior = tfd.LogNormal(
            loc=tf.log(.05 * observed_stddev),
            scale=3.,
            name='level_scale_prior')
      if slope_scale_prior is None:
        slope_scale_prior = tfd.LogNormal(
            loc=tf.log(.05 * observed_stddev),
            scale=3.,
            name='slope_scale_prior')
      if initial_level_prior is None:
        initial_level_prior = tfd.Normal(
            loc=observed_initial,
            scale=tf.abs(observed_initial) + observed_stddev,
            name='initial_level_prior')
      if initial_slope_prior is None:
        initial_slope_prior = tfd.Normal(
            loc=0., scale=observed_stddev, name='initial_slope_prior')

      tf.assert_same_float_dtype([
          level_scale_prior, slope_scale_prior, initial_level_prior,
          initial_slope_prior
      ])

      self._initial_state_prior = tfd.MultivariateNormalDiag(
          loc=tf.stack(
              [initial_level_prior.mean(),
               initial_slope_prior.mean()
              ], axis=-1),
          scale_diag=tf.stack([
              initial_level_prior.stddev(),
              initial_slope_prior.stddev()
          ], axis=-1))

      super(LocalLinearTrend, self).__init__(
          parameters=[
              Parameter('level_scale', level_scale_prior, tfb.Softplus()),
              Parameter('slope_scale', slope_scale_prior, tfb.Softplus())
          ],
          latent_size=2,
          name=name)
コード例 #30
0
    def __init__(self,
                 period,
                 frequency_multipliers,
                 allow_drift=True,
                 drift_scale_prior=None,
                 initial_state_prior=None,
                 observed_time_series=None,
                 name=None):
        """Specify a smooth seasonal effects model.

    Args:
      period: positive scalar `float` `Tensor` giving the number of timesteps
        required for the longest cyclic effect to repeat.
      frequency_multipliers: One-dimensional `float` `Tensor` listing the
        frequencies (cyclic components) included in the model, as multipliers of
        the base/fundamental frequency `2. * pi / period`. Each component is
        specified by the number of times it repeats per period, and adds two
        latent dimensions to the model. A smooth seasonal model that can
        represent any periodic function is given by `frequency_multipliers = [1,
        2, ..., floor(period / 2)]`. However, it is often desirable to enforce a
        smoothness assumption (and reduce the computational burden) by dropping
        some of the higher frequencies.
      allow_drift: optional Python `bool` specifying whether the seasonal
        effects can drift over time.  Setting this to `False`
        removes the `drift_scale` parameter from the model. This is
        mathematically equivalent to
        `drift_scale_prior = tfd.Deterministic(0.)`, but removing drift
        directly is preferred because it avoids the use of a degenerate prior.
        Default value: `True`.
      drift_scale_prior: optional `tfd.Distribution` instance specifying a prior
        on the `drift_scale` parameter. If `None`, a heuristic default prior is
        constructed based on the provided `observed_time_series`.
        Default value: `None`.
      initial_state_prior: instance of `tfd.MultivariateNormal` representing
        the prior distribution on the latent states. Must have event shape
        `[2 * len(frequency_multipliers)]`. If `None`, a heuristic default prior
        is constructed based on the provided `observed_time_series`.
      observed_time_series: optional `float` `Tensor` of shape
        `batch_shape + [T, 1]` (omitting the trailing unit dimension is also
        supported when `T > 1`), specifying an observed time series.
        Any priors not explicitly set will be given default values according to
        the scale of the observed time series (or batch of time series). May
        optionally be an instance of `tfp.sts.MaskedTimeSeries`, which includes
        a mask `Tensor` to specify timesteps with missing observations.
        Default value: `None`.
      name: the name of this model component.
        Default value: 'SmoothSeasonal'.

    """

        with tf.name_scope(name or 'SmoothSeasonal') as name:

            _, observed_stddev, observed_initial = (
                sts_util.empirical_statistics(observed_time_series)
                if observed_time_series is not None else (0., 1., 0.))

            latent_size = 2 * static_num_frequencies(frequency_multipliers)

            # Heuristic default priors. Overriding these may dramatically
            # change inference performance and results.
            if drift_scale_prior is None:
                drift_scale_prior = tfd.LogNormal(loc=tf.math.log(
                    .01 * observed_stddev),
                                                  scale=3.)

            if initial_state_prior is None:
                initial_state_scale = (tf.abs(observed_initial) +
                                       observed_stddev)[..., tf.newaxis]
                ones = tf.ones([latent_size], dtype=drift_scale_prior.dtype)
                initial_state_prior = tfd.MultivariateNormalDiag(
                    scale_diag=initial_state_scale * ones)

            self._initial_state_prior = initial_state_prior
            self._period = period
            self._frequency_multipliers = frequency_multipliers

            parameters = []
            if allow_drift:
                parameters.append(
                    Parameter(
                        'drift_scale', drift_scale_prior,
                        tfb.Chain([
                            tfb.AffineScalar(scale=observed_stddev),
                            tfb.Softplus()
                        ])))
            self._allow_drift = allow_drift

            super(SmoothSeasonal, self).__init__(parameters=parameters,
                                                 latent_size=latent_size,
                                                 name=name)