示例#1
0
  def test_default_event_space_bijector(self):
    # pylint: disable=bad-whitespace
    d = tfd.JointDistributionNamed(dict(
        e    =          tfd.Independent(tfd.Exponential(rate=[100, 120]), 1),
        scale=lambda e: tfd.Gamma(concentration=e[..., 0], rate=e[..., 1]),
        s    =          tfd.HalfNormal(2.5),
        loc  =lambda s: tfd.Normal(loc=0, scale=s),
        df   =          tfd.Exponential(2),
        x    =          tfd.StudentT),
                                   validate_args=True)
    # pylint: enable=bad-whitespace
    with self.assertRaisesRegex(
        NotImplementedError, 'all elements of `model` are `tfp.distribution`s'):
      d._experimental_default_event_space_bijector()

    d = tfd.JointDistributionNamed(
        dict(e=tfd.Independent(tfd.Exponential(rate=[10, 12]), 1),
             x=tfd.Normal(loc=1, scale=1.),
             s=tfd.HalfNormal(2.5)),
        validate_args=True)
    for b in d._model_flatten(d._experimental_default_event_space_bijector()):
      self.assertIsInstance(b, tfb.Bijector)
    self.assertSetEqual(set(d.model.keys()),
                        set(d._experimental_default_event_space_bijector(
                            ).keys()))
示例#2
0
    def test_sample_complex_dependency(self):
        # pylint: disable=bad-whitespace
        d = tfd.JointDistributionNamed(dict(
            y=tfd.StudentT,
            x=tfd.StudentT,
            df=tfd.Exponential(2),
            loc=lambda s: tfd.Normal(loc=0, scale=s),
            s=tfd.HalfNormal(2.5),
            scale=lambda e: tfd.Gamma(concentration=e[..., 0], rate=e[..., 1]),
            e=tfd.Independent(tfd.Exponential(rate=[100, 120]), 1)),
                                       validate_args=False)

        # pylint: enable=bad-whitespace

        self.assertEqual((
            ('e', ()),
            ('scale', ('e', )),
            ('s', ()),
            ('loc', ('s', )),
            ('df', ()),
            ('y', ('df', 'loc', 'scale')),
            ('x', ('df', 'loc', 'scale')),
        ), d.resolve_graph())

        x = d.sample()
        self.assertLen(x, 7)

        ds, s = d.sample_distributions()
        self.assertEqual(ds['x'].parameters['df'], s['df'])
        self.assertEqual(ds['x'].parameters['loc'], s['loc'])
        self.assertEqual(ds['x'].parameters['scale'], s['scale'])
        self.assertEqual(ds['y'].parameters['df'], s['df'])
        self.assertEqual(ds['y'].parameters['loc'], s['loc'])
        self.assertEqual(ds['y'].parameters['scale'], s['scale'])
示例#3
0
 def test_sample_shape_propagation_nondefault_behavior(self):
     # pylint: disable=bad-whitespace
     d = tfd.JointDistributionNamed(dict(
         e=tfd.Independent(tfd.Exponential(rate=[100, 120]), 1),
         scale=lambda e: tfd.Gamma(concentration=e[..., 0], rate=e[..., 1]),
         s=tfd.HalfNormal(2.5),
         loc=lambda s: tfd.Normal(loc=0, scale=s),
         df=tfd.Exponential(2),
         x=tfd.StudentT),
                                    validate_args=False)
     # pylint: enable=bad-whitespace
     # The following enables the nondefault sample shape behavior.
     d._always_use_specified_sample_shape = True
     sample_shape = (2, 3)
     x = d.sample(sample_shape, seed=test_util.test_seed())
     self.assertLen(x, 6)
     self.assertEqual(sample_shape + (2, ), x['e'].shape)
     self.assertEqual(sample_shape * 2, x['scale'].shape)  # Has 1 arg.
     self.assertEqual(sample_shape * 1, x['s'].shape)  # Has 0 args.
     self.assertEqual(sample_shape * 2, x['loc'].shape)  # Has 1 arg.
     self.assertEqual(sample_shape * 1, x['df'].shape)  # Has 0 args.
     # Has 3 args, one being scalar.
     self.assertEqual(sample_shape * 3, x['x'].shape)
     lp = d.log_prob(x)
     self.assertEqual(sample_shape * 3, lp.shape)
示例#4
0
  def test_default_event_space_bijector(self):
    # pylint: disable=bad-whitespace
    d = tfd.JointDistributionNamed(dict(
        e    =          tfd.Independent(tfd.Exponential(rate=[100, 120]), 1),
        scale=lambda e: tfd.Gamma(concentration=e[..., 0], rate=e[..., 1]),
        s    =          tfd.HalfNormal(2.5),
        loc  =lambda s: tfd.Normal(loc=0, scale=s),
        df   =          tfd.Exponential(2),
        x    =          tfd.StudentT),
                                   validate_args=True)
    # pylint: enable=bad-whitespace

    # The event space bijector is inherited from `JointDistributionSequential`
    # and is tested more thoroughly in the tests for that class.
    b = d.experimental_default_event_space_bijector()
    y = self.evaluate(d.sample(seed=test_util.test_seed()))
    y_ = self.evaluate(b.forward(b.inverse(y)))
    self.assertAllClose(y, y_)

    # Verify that event shapes are passed through and flattened/unflattened
    # correctly.
    forward_event_shapes = b.forward_event_shape(d.event_shape)
    inverse_event_shapes = b.inverse_event_shape(d.event_shape)
    self.assertEqual(forward_event_shapes, d.event_shape)
    self.assertEqual(inverse_event_shapes, d.event_shape)

    # Verify that the outputs of other methods have the correct dict structure.
    forward_event_shape_tensors = b.forward_event_shape_tensor(
        d.event_shape_tensor())
    inverse_event_shape_tensors = b.inverse_event_shape_tensor(
        d.event_shape_tensor())
    for item in [forward_event_shape_tensors, inverse_event_shape_tensors]:
      self.assertSetEqual(set(self.evaluate(item).keys()), set(d.model.keys()))
示例#5
0
    def testStatefulComponentDist(self):
        class StatefulNormal(tfd.Distribution):
            def __init__(self, loc):
                self._loc = tf.convert_to_tensor(loc)
                super(StatefulNormal, self).__init__(
                    dtype=tf.float32,
                    reparameterization_type=tfd.FULLY_REPARAMETERIZED,
                    validate_args=False,
                    allow_nan_stats=False)

            def _batch_shape(self):
                return self._loc.shape

            def _event_shape(self):
                return []

            def _sample_n(self, n, seed=None):
                return self._loc + tf.random.normal(
                    tf.concat([[n], tf.shape(self._loc)], axis=0), seed=seed)

        mix = tfd.Mixture(
            cat=tfd.Categorical(logits=[0., 0]),
            components=[tfd.HalfNormal(scale=2.),
                        StatefulNormal(loc=3.)])
        with warnings.catch_warnings(record=True) as triggered:
            self.evaluate(mix.sample(seed=test_util.test_seed()))
        self.assertTrue(
            any('Falling back to stateful sampling for `components[1]`' in str(
                warning.message) for warning in triggered))
示例#6
0
 def test_graph_resolution(self):
     # pylint: disable=bad-whitespace
     d = tfd.JointDistributionNamed(dict(
         e=tfd.Independent(tfd.Exponential(rate=[100, 120]), 1),
         scale=lambda e: tfd.Gamma(concentration=e[..., 0], rate=e[..., 1]),
         s=tfd.HalfNormal(2.5),
         loc=lambda s: tfd.Normal(loc=0, scale=s),
         df=tfd.Exponential(2),
         x=tfd.StudentT),
                                    validate_args=True)
     # pylint: enable=bad-whitespace
     self.assertEqual(
         (('e', ()), ('scale', ('e', )), ('s', ()), ('loc', ('s', )),
          ('df', ()), ('x', ('df', 'loc', 'scale'))), d.resolve_graph())
示例#7
0
 def test_sample_shape_propagation_default_behavior(self):
     # pylint: disable=bad-whitespace
     d = tfd.JointDistributionNamed(dict(
         e=tfd.Independent(tfd.Exponential(rate=[100, 120]), 1),
         scale=lambda e: tfd.Gamma(concentration=e[..., 0], rate=e[..., 1]),
         s=tfd.HalfNormal(2.5),
         loc=lambda s: tfd.Normal(loc=0, scale=s),
         df=tfd.Exponential(2),
         x=tfd.StudentT),
                                    validate_args=False)
     # pylint: enable=bad-whitespace
     x = d.sample([2, 3], seed=test_util.test_seed())
     self.assertLen(x, 6)
     self.assertEqual((2, 3, 2), x['e'].shape)
     self.assertEqual((2, 3), x['scale'].shape)
     self.assertEqual((2, 3), x['s'].shape)
     self.assertEqual((2, 3), x['loc'].shape)
     self.assertEqual((2, 3), x['df'].shape)
     self.assertEqual((2, 3), x['x'].shape)
     lp = d.log_prob(x)
     self.assertEqual((2, 3), lp.shape)
示例#8
0
  def __init__(self,
               design_matrix,
               weights_prior_scale=0.1,
               weights_batch_shape=None,
               name=None):
    """Specify a sparse linear regression model.

    Args:
      design_matrix: float `Tensor` of shape `concat([batch_shape,
        [num_timesteps, num_features]])`. This may also optionally be
        an instance of `tf.linalg.LinearOperator`.
      weights_prior_scale: float `Tensor` defining the scale of the Horseshoe
        prior on regression weights. Small values encourage the weights to be
        sparse. The shape must broadcast with `weights_batch_shape`.
        Default value: `0.1`.
      weights_batch_shape: if `None`, defaults to
        `design_matrix.batch_shape_tensor()`. Must broadcast with the batch
        shape of `design_matrix`.
        Default value: `None`.
      name: the name of this model component.
        Default value: 'SparseLinearRegression'.
    """
    with tf.compat.v1.name_scope(
        name, 'SparseLinearRegression',
        values=[design_matrix, weights_prior_scale]) as name:

      if not isinstance(design_matrix, tfl.LinearOperator):
        design_matrix = tfl.LinearOperatorFullMatrix(
            tf.convert_to_tensor(value=design_matrix, name='design_matrix'),
            name='design_matrix_linop')

      if tf.compat.dimension_value(design_matrix.shape[-1]) is not None:
        num_features = design_matrix.shape[-1]
      else:
        num_features = design_matrix.shape_tensor()[-1]

      if weights_batch_shape is None:
        weights_batch_shape = design_matrix.batch_shape_tensor()
      else:
        weights_batch_shape = tf.convert_to_tensor(value=weights_batch_shape,
                                                   dtype=tf.int32)
      weights_shape = tf.concat([weights_batch_shape, [num_features]], axis=0)

      dtype = design_matrix.dtype

      self._design_matrix = design_matrix
      self._weights_prior_scale = weights_prior_scale

      ones_like_weights_batch = tf.ones(weights_batch_shape, dtype=dtype)
      ones_like_weights = tf.ones(weights_shape, dtype=dtype)
      super(SparseLinearRegression, self).__init__(
          parameters=[
              Parameter('global_scale_variance',
                        prior=tfd.InverseGamma(
                            0.5 * ones_like_weights_batch,
                            0.5 * ones_like_weights_batch),
                        bijector=tfb.Softplus()),
              Parameter('global_scale_noncentered',
                        prior=tfd.HalfNormal(
                            scale=ones_like_weights_batch),
                        bijector=tfb.Softplus()),
              Parameter('local_scale_variances',
                        prior=tfd.Independent(tfd.InverseGamma(
                            0.5 * ones_like_weights,
                            0.5 * ones_like_weights),
                                              reinterpreted_batch_ndims=1),
                        bijector=tfb.Softplus()),
              Parameter('local_scales_noncentered',
                        prior=tfd.Independent(tfd.HalfNormal(
                            scale=ones_like_weights),
                                              reinterpreted_batch_ndims=1),
                        bijector=tfb.Softplus()),
              Parameter('weights_noncentered',
                        prior=tfd.Independent(tfd.Normal(
                            loc=tf.zeros_like(ones_like_weights),
                            scale=ones_like_weights),
                                              reinterpreted_batch_ndims=1),
                        bijector=tfb.Identity())
          ],
          latent_size=0,
          name=name)
示例#9
0
  def testSampleEndtoEnd(self):
    """An end-to-end test of sampling using NUTS."""
    strm = tfp_test_util.test_seed_stream()
    predictors = tf.cast([
        201., 244., 47., 287., 203., 58., 210., 202., 198., 158., 165., 201.,
        157., 131., 166., 160., 186., 125., 218., 146.
    ], tf.float32)
    obs = tf.cast([
        592., 401., 583., 402., 495., 173., 479., 504., 510., 416., 393., 442.,
        317., 311., 400., 337., 423., 334., 533., 344.
    ], tf.float32)
    y_sigma = tf.cast([
        61., 25., 38., 15., 21., 15., 27., 14., 30., 16., 14., 25., 52., 16.,
        34., 31., 42., 26., 16., 22.
    ], tf.float32)

    # Robust linear regression model
    robust_lm = tfd.JointDistributionSequential(
        [
            tfd.Normal(loc=0., scale=1.),  # b0
            tfd.Normal(loc=0., scale=1.),  # b1
            tfd.HalfNormal(5.),  # df
            lambda df, b1, b0: tfd.Independent(  # pylint: disable=g-long-lambda
                tfd.StudentT(  # Likelihood
                    df=df[:, None],
                    loc=b0[:, None] + b1[:, None] * predictors[None, :],
                    scale=y_sigma[None, :])),
        ],
        validate_args=True)

    log_prob = lambda b0, b1, df: robust_lm.log_prob([b0, b1, df, obs])
    init_step_size = [1., .2, .5]
    step_size0 = [tf.cast(x, dtype=tf.float32) for x in init_step_size]

    number_of_steps, burnin, nchain = 200, 50, 10

    @tf.function(autograph=False)
    def run_chain_and_get_diagnostic():
      # random initialization of the starting postion of each chain
      b0, b1, df, _ = robust_lm.sample(nchain, seed=strm())

      # bijector to map contrained parameters to real
      unconstraining_bijectors = [
          tfb.Identity(),
          tfb.Identity(),
          tfb.Exp(),
      ]

      def trace_fn(_, pkr):
        return (pkr.inner_results.inner_results.step_size,
                pkr.inner_results.inner_results.log_accept_ratio)

      kernel = tfp.mcmc.DualAveragingStepSizeAdaptation(
          tfp.mcmc.TransformedTransitionKernel(
              inner_kernel=tfp.mcmc.NoUTurnSampler(
                  target_log_prob_fn=log_prob,
                  step_size=step_size0,
                  parallel_iterations=1,
                  seed=strm()),
              bijector=unconstraining_bijectors),
          target_accept_prob=.8,
          num_adaptation_steps=burnin,
          step_size_setter_fn=lambda pkr, new_step_size: pkr._replace(  # pylint: disable=g-long-lambda
              inner_results=pkr.inner_results._replace(step_size=new_step_size)
          ),
          step_size_getter_fn=lambda pkr: pkr.inner_results.step_size,
          log_accept_prob_getter_fn=lambda pkr: pkr.inner_results.
          log_accept_ratio,
      )

      # Sampling from the chain and get diagnostics
      mcmc_trace, (step_size, log_accept_ratio) = tfp.mcmc.sample_chain(
          num_results=number_of_steps,
          num_burnin_steps=burnin,
          current_state=[b0, b1, df],
          kernel=kernel,
          trace_fn=trace_fn,
          parallel_iterations=1)
      rhat = tfp.mcmc.potential_scale_reduction(mcmc_trace)
      return (
          [s[-1] for s in step_size],  # final step size
          tf.math.exp(tfp.math.reduce_logmeanexp(log_accept_ratio)),
          [tf.reduce_mean(rhat_) for rhat_ in rhat],  # average rhat
      )

    # Sample from posterior distribution and get diagnostic
    [
        final_step_size, average_accept_ratio, average_rhat
    ] = self.evaluate(run_chain_and_get_diagnostic())

    # Check that step size adaptation reduced the initial step size
    self.assertAllLess(
        np.asarray(final_step_size) - np.asarray(init_step_size), 0.)
    # Check that average acceptance ratio is close to target
    self.assertAllClose(
        average_accept_ratio,
        .8 * np.ones_like(average_accept_ratio),
        atol=0.1, rtol=0.1)
    # Check that mcmc sample quality is acceptable with tuning
    self.assertAllClose(
        average_rhat, np.ones_like(average_rhat), atol=0.05, rtol=0.05)