示例#1
0
        def loop_body(should_continue, k, seed):
            """Resample the non-accepted points."""
            u_seed, next_seed = samplers.split_seed(seed)
            # The range of U is chosen so that the resulting sample K lies in
            # [0, tf.int64.max). The final sample, if accepted, is K + 1.
            u = samplers.uniform(shape,
                                 minval=minval_u,
                                 maxval=maxval_u,
                                 dtype=power.dtype,
                                 seed=u_seed)

            # Sample the point X from the continuous density h(x) \propto x^(-power).
            x = self._hat_integral_inverse(u, power=power)

            # Rejection-inversion requires a `hat` function, h(x) such that
            # \int_{k - .5}^{k + .5} h(x) dx >= pmf(k + 1) for points k in the
            # support. A natural hat function for us is h(x) = x^(-power).
            #
            # After sampling X from h(x), suppose it lies in the interval
            # (K - .5, K + .5) for integer K. Then the corresponding K is accepted if
            # if lies to the left of x_K, where x_K is defined by:
            #   \int_{x_k}^{K + .5} h(x) dx = H(x_K) - H(K + .5) = pmf(K + 1),
            # where H(x) = \int_x^inf h(x) dx.

            # Solving for x_K, we find that x_K = H_inverse(H(K + .5) + pmf(K + 1)).
            # Or, the acceptance condition is X <= H_inverse(H(K + .5) + pmf(K + 1)).
            # Since X = H_inverse(U), this simplifies to U <= H(K + .5) + pmf(K + 1).

            # Update the non-accepted points.
            # Since X \in (K - .5, K + .5), the sample K is chosen as floor(X + 0.5).
            k = tf.where(should_continue, tf.floor(x + 0.5), k)
            accept = (u <= self._hat_integral(k + .5, power=power) +
                      tf.exp(self._log_prob(k + 1, power=power)))

            return [should_continue & (~accept), k, next_seed]
示例#2
0
 def _sample_n(self, n, seed=None):
   probs = self._probs_parameter_no_checks()
   cut_probs = self._cut_probs(probs)
   new_shape = tf.concat([[n], tf.shape(cut_probs)], axis=0)
   uniform = samplers.uniform(new_shape, seed=seed, dtype=cut_probs.dtype)
   sample = self._quantile(uniform, probs)
   return tf.cast(sample, self.dtype)
 def _body(found, seed, left, right, x_next):
     """Iterates until every chain has found a suitable next state."""
     proportions_seed, next_seed = samplers.split_seed(seed)
     proportions = samplers.uniform(x_initial_shape,
                                    dtype=x_initial_dtype,
                                    seed=proportions_seed)
     x_proposed = tf.where(~found, left + proportions * (right - left),
                           x_next)
     accept_res = _test_acceptance(x_initial,
                                   target_log_prob=target_log_prob,
                                   decided=found,
                                   log_slice_heights=log_slice_heights,
                                   x_proposed=x_proposed,
                                   step_size=step_size,
                                   lower_bounds=left,
                                   upper_bounds=right)
     boundary_test = log_slice_heights < target_log_prob(x_proposed)
     can_accept = boundary_test & accept_res
     next_found = found | can_accept
     # Note that it might seem that we are moving the left and right end points
     # even if the point has been accepted (which is contrary to the stated
     # algorithm in Neal). However, this does not matter because the endpoints
     # for points that have been already accepted are not used again so it
     # doesn't matter what we do with them.
     next_left = tf.where(x_proposed < x_initial, x_proposed, left)
     next_right = tf.where(x_proposed >= x_initial, x_proposed, right)
     return (next_found, next_seed, next_left, next_right, x_proposed)
    def _build_test_model(self,
                          num_timesteps=5,
                          num_features=2,
                          batch_shape=(),
                          missing_prob=0,
                          true_noise_scale=0.1,
                          true_level_scale=0.04,
                          true_slope_scale=0.02,
                          prior_class=tfd.InverseGamma,
                          dtype=tf.float32):
        seed = test_util.test_seed(sampler_type='stateless')
        (design_seed, weights_seed, noise_seed, level_seed, slope_seed,
         is_missing_seed) = samplers.split_seed(seed,
                                                6,
                                                salt='_build_test_model')

        design_matrix = samplers.normal([num_timesteps, num_features],
                                        dtype=dtype,
                                        seed=design_seed)
        weights = samplers.normal(list(batch_shape) + [num_features],
                                  dtype=dtype,
                                  seed=weights_seed)
        regression = tf.linalg.matvec(design_matrix, weights)
        noise = samplers.normal(list(batch_shape) + [num_timesteps],
                                dtype=dtype,
                                seed=noise_seed) * true_noise_scale

        level_residuals = samplers.normal(list(batch_shape) + [num_timesteps],
                                          dtype=dtype,
                                          seed=level_seed) * true_level_scale
        if true_slope_scale is not None:
            slope = tf.cumsum(
                samplers.normal(list(batch_shape) + [num_timesteps],
                                dtype=dtype,
                                seed=slope_seed) * true_slope_scale,
                axis=-1)
            level_residuals += slope
        level = tf.cumsum(level_residuals, axis=-1)
        time_series = (regression + noise + level)
        is_missing = samplers.uniform(list(batch_shape) + [num_timesteps],
                                      dtype=dtype,
                                      seed=is_missing_seed) < missing_prob

        model = gibbs_sampler.build_model_for_gibbs_fitting(
            observed_time_series=tfp.sts.MaskedTimeSeries(
                time_series[..., tf.newaxis], is_missing),
            design_matrix=design_matrix,
            weights_prior=tfd.Normal(loc=tf.cast(0., dtype),
                                     scale=tf.cast(10.0, dtype)),
            level_variance_prior=prior_class(concentration=tf.cast(
                0.01, dtype),
                                             scale=tf.cast(0.01 * 0.01,
                                                           dtype)),
            slope_variance_prior=None if true_slope_scale is None else
            prior_class(concentration=tf.cast(0.01, dtype),
                        scale=tf.cast(0.01 * 0.01, dtype)),
            observation_noise_variance_prior=prior_class(
                concentration=tf.cast(0.01, dtype),
                scale=tf.cast(0.01 * 0.01, dtype)))
        return model, time_series, is_missing
示例#5
0
def rademacher(shape, dtype=tf.float32, seed=None, name=None):
    """Generates `Tensor` consisting of `-1` or `+1`, chosen uniformly at random.

  For more details, see [Rademacher distribution](
  https://en.wikipedia.org/wiki/Rademacher_distribution).

  Args:
    shape: Vector-shaped, `int` `Tensor` representing shape of output.
    dtype: (Optional) TF `dtype` representing `dtype` of output.
    seed: (Optional) Python integer to seed the random number generator.
    name: Python `str` name prefixed to Ops created by this function.
      Default value: `None` (i.e., 'random_rademacher').

  Returns:
    rademacher: `Tensor` with specified `shape` and `dtype` consisting of `-1`
      or `+1` chosen uniformly-at-random.
  """
    with tf.name_scope(name or 'rademacher'):
        # Choose the dtype to cause `2 * random_bernoulli - 1` to run in the same
        # memory (host or device) as the downstream cast will want to put it.  The
        # convention on GPU is that int32 are in host memory and int64 are in device
        # memory.
        shape = ps.convert_to_shape_tensor(shape)
        generation_dtype = tf.int64 if tf.as_dtype(
            dtype) != tf.int32 else tf.int32
        random_bernoulli = samplers.uniform(shape,
                                            minval=0,
                                            maxval=2,
                                            dtype=generation_dtype,
                                            seed=seed)
        return tf.cast(2 * random_bernoulli - 1, dtype)
示例#6
0
    def generate_and_test_samples(seed):
      """Generate and test samples."""
      v_seed, u_seed = samplers.split_seed(seed)

      x = samplers.normal(shape, dtype=internal_dtype, seed=v_seed)
      # This implicitly broadcasts concentration up to sample shape.
      v = 1 + c * x
      # In [1], there is an 'inner' rejection sampling loop which checks that
      # v > 0 and generates a new normal sample if it's not, saving the rest of
      # the computations below. We found that merging the check for  v > 0 with
      # the `good_sample_mask` not only simplifies the code, but leads to a
      # ~2x speedup for small concentrations on GPU, at the cost of deviating
      # slightly from the implementation given in Ref. [1].
      accept_v = v > 0.
      logv = tf.math.log1p(c * x)
      x2 = x * x
      v3 = v * v * v
      logv3 = logv * 3

      u = samplers.uniform(
          shape, dtype=internal_dtype, seed=u_seed)

      # In [1], the suggestion is to first check u < 1 - 0.331 * x2 * x2, and to
      # run the check below only if it fails, in order to avoid the relatively
      # expensive logarithm calls. Our algorithm operates in batch mode: we will
      # have to compute or not compute the logarithms for the entire batch, and
      # as the batch gets larger, the odds we compute it grow. Therefore we
      # don't bother with the "cheap" check.
      good_sample_mask = tf.logical_and(
          tf.math.log(u) < (x2 / 2. + d * (1 - v3 + logv3)), accept_v)

      return logv3 if log_space else v3, good_sample_mask
示例#7
0
 def _sample_n(self, n, seed=None):
     low = tf.convert_to_tensor(self.low)
     high = tf.convert_to_tensor(self.high)
     shape = ps.concat(
         [[n], self._batch_shape_tensor(low=low, high=high)], 0)
     samples = samplers.uniform(shape=shape, dtype=self.dtype, seed=seed)
     return low + self._range(low=low, high=high) * samples
示例#8
0
def _sample_bates(total_count, low, high, n, seed=None):
  """Vectorized production of `Bates` samples.

  Args:
    total_count: (Batches of) counts of `Uniform`s to take means of.  Should
      have integer dtype and already be broadcasted to the batch shape.
    low: (Batches of) lower bounds of the `Uniform` variables to sample.  Should
      be the same floating dtype as `high` and broadcastable to the batch shape.
    high: (Batches of) upper bounds of the `Uniform` variables to sample. Should
      be the same floating dtype as `low` and broadcastable to the batch shape.
    n: `int32` number of samples to generate.
    seed: Random seed to pass to `Uniform` sampler.

  Returns:
    samples: Samples of (batches of) the `Bates` variable.  Will have same dtype
      as `low` and `high`. If the batch shape is `[B1,..., Bn]`, `samples` has
      shape `[n, B1,..., Bn]`.
  """

  # 1. Sample Uniform(0, 1)s, flattening the batch dimension into axis 0.
  uniform_sample_shape = tf.concat([[tf.reduce_sum(total_count)], [n]], axis=0)
  uniform_samples = samplers.uniform(
      uniform_sample_shape, minval=0., maxval=1., dtype=low.dtype, seed=seed)
  # 2. Produce segment means.
  segment_lengths = tf.reshape(total_count, [-1])
  segment_ids = tf.repeat(tf.range(tf.size(segment_lengths)), segment_lengths)
  flatmeans = tf.math.segment_mean(uniform_samples, segment_ids)
  # 3. Reshape and transpose segment means back to the original shape.
  outshape = tf.concat([tf.shape(total_count), [n]], axis=0)
  tmeans = tf.reshape(flatmeans, outshape)
  axes = tf.range(tf.rank(tmeans))
  means = tf.transpose(tmeans, tf.roll(axes, shift=1, axis=0))
  # 4. Shift/scale from (0, 1) to (low, high).
  return low + (high - low) * means
def make_onehot_categorical(batch_shape, num_classes, dtype=tf.int32):
    logits = -50. + samplers.uniform(list(batch_shape) + [num_classes],
                                     -10,
                                     10,
                                     dtype=tf.float32,
                                     seed=test_util.test_seed())
    return tfd.OneHotCategorical(logits, dtype=dtype, validate_args=True)
 def _sample_3d(self, n, mean_direction, concentration, seed=None):
     """Specialized inversion sampler for 3D."""
     u_shape = ps.concat(
         [[n],
          self._batch_shape_tensor(mean_direction=mean_direction,
                                   concentration=concentration)],
         axis=0)
     z = samplers.uniform(u_shape, seed=seed, dtype=self.dtype)
     # TODO(bjp): Higher-order odd dim analytic CDFs are available in [1], could
     # be bisected for bounded sampling runtime (i.e. not rejection sampling).
     # [1]: Inversion sampler via: https://ieeexplore.ieee.org/document/7347705/
     # The inversion is: u = 1 + log(z + (1-z)*exp(-2*kappa)) / kappa
     # We must protect against both kappa and z being zero.
     safe_conc = tf.where(concentration > 0, concentration,
                          tf.ones_like(concentration))
     safe_z = tf.where(z > 0, z, tf.ones_like(z))
     safe_u = 1 + tf.reduce_logsumexp(
         [tf.math.log(safe_z),
          tf.math.log1p(-safe_z) - 2 * safe_conc],
         axis=0) / safe_conc
     # Limit of the above expression as kappa->0 is 2*z-1
     u = tf.where(concentration > 0., safe_u, 2 * z - 1)
     # Limit of the expression as z->0 is -1.
     u = tf.where(tf.equal(z, 0), -tf.ones_like(u), u)
     if not self._allow_nan_stats:
         u = tf.debugging.check_numerics(u, 'u in _sample_3d')
     return u[..., tf.newaxis]
示例#11
0
def _random_gamma_cpu(
    shape, concentration, rate=None, log_rate=None, seed=None, log_space=False):
  """Sample using *fast* `tf.random.stateless_gamma`."""
  bad_concentration = (concentration <= 0.) | tf.math.is_nan(concentration)
  safe_concentration = tf.where(
      bad_concentration,
      dtype_util.as_numpy_dtype(concentration.dtype)(100.), concentration)

  if rate is None and log_rate is None:
    rate = tf.ones([], concentration.dtype)
    log_rate = tf.zeros([], concentration.dtype)

  if log_space:
    # The underlying gamma sampler uses a recurrence for conc < 1.  When
    # a ~ gamma(conc + 1) and x ~ uniform(0, 1), we have
    #   b = a * x ** (1/conc) ~ gamma(conc)
    # Given that we want log(b) anyway, it's more accurate to just ask the
    # sampler for a (by passing conc + 1 to it in the first place) and
    # do the correction in log-space below.
    orig_safe_concentration = safe_concentration
    safe_concentration = tf.where(
        orig_safe_concentration < 1,
        orig_safe_concentration + 1.,
        orig_safe_concentration)
    seed, conc_fix_seed = samplers.split_seed(seed)
    log_rate = tf.math.log(rate) if log_rate is None else log_rate
    rate = tf.ones_like(log_rate)  # Do the division later in log-space.

  if rate is None:
    rate = tf.math.exp(log_rate)

  bad_rate = (rate <= 0.) | tf.math.is_nan(rate)
  safe_rate = tf.where(
      bad_rate,
      dtype_util.as_numpy_dtype(concentration.dtype)(100.), rate)
  samples = tf.random.stateless_gamma(
      shape=shape, seed=seed, alpha=safe_concentration,
      beta=safe_rate, dtype=concentration.dtype)

  if log_space:
    # Apply the concentration < 1 recurrence here, in log-space.
    samples = tf.math.log(samples)
    conc_fix_unif = samplers.uniform(  # in [0, 1)
        shape, dtype=samples.dtype, seed=conc_fix_seed)

    conc_lt_one_fix = tf.where(
        orig_safe_concentration < 1,
        # Why do we use log1p(-x)? x is in [0, 1) and log(0) = -inf, is bad.
        # x ~ U(0,1) => 1-x ~ U(0,1)
        # But at the boundary, 1-x in (0, 1]. Good.
        # So we can take log(unif(0,1)) safely as log(1-unif(0,1)).
        # log1p(-0) = 0, and log1p(-almost_one) = -not_quite_inf. Good.
        tf.math.log1p(-conc_fix_unif) / orig_safe_concentration,
        tf.zeros((), dtype=samples.dtype))
    samples += (conc_lt_one_fix - log_rate)

  return tf.where(
      bad_rate | bad_concentration,
      dtype_util.as_numpy_dtype(concentration.dtype)(np.nan), samples)
示例#12
0
  def loop_body(done, u, w, seed):
    """Resample the non-accepted points."""
    # We resample u each time completely. Only its sign is used outside the
    # loop, which is random.
    u_seed, v_seed, next_seed = samplers.split_seed(seed, n=3)
    u = samplers.uniform(
        shape, minval=-1., maxval=1., dtype=concentration.dtype, seed=u_seed)
    z = tf.cos(np.pi * u)
    # Update the non-accepted points.
    w = tf.where(done, w, (1. + s * z) / (s + z))
    y = concentration * (s - w)

    v = samplers.uniform(
        shape, minval=0., maxval=1., dtype=concentration.dtype, seed=v_seed)
    accept = (y * (2. - y) >= v) | (tf.math.log(y / v) + 1. >= y)

    return done | accept, u, w, next_seed
示例#13
0
 def body(unused_keep_going, geom_sum, num_geom, seed):
     u_seed, next_seed = samplers.split_seed(seed)
     u = samplers.uniform(full_shape, seed=u_seed, dtype=counts.dtype)
     geom = tf.math.ceil(tf.math.log(u) / log1minusprob)
     geom_sum += geom
     keep_going = (geom_sum <= counts)
     num_geom = tf.where(keep_going, num_geom + 1, num_geom)
     return tf.reduce_any(keep_going), geom_sum, num_geom, next_seed
  def _sample_n(self, n, seed=None):
    loc, scale, low, high = self._loc_scale_low_high()
    batch_shape = self._batch_shape_tensor(
        loc=loc, scale=scale, low=low, high=high)
    sample_and_batch_shape = ps.concat([[n], batch_shape], axis=0)

    u = samplers.uniform(sample_and_batch_shape, dtype=self.dtype, seed=seed)
    return self._quantile(u, loc=loc, scale=scale, low=low, high=high)
 def proposal_fn(seed):
     # Test static and dynamic shape of proposed samples.
     uniform_samples = self.maybe_static(
         samplers.uniform([samples_per_distribution, 2],
                          seed=seed,
                          dtype=dtype), is_static)
     return uniform_samples, tf.ones_like(
         uniform_samples) * upper_bounds
示例#16
0
 def _sample_n(self, n, seed=None):
   loc = tf.convert_to_tensor(self.loc)
   scale = tf.convert_to_tensor(self.scale)
   batch_shape = self._batch_shape_tensor(loc=loc, scale=scale)
   shape = ps.concat([[n], batch_shape], 0)
   probs = samplers.uniform(
       shape=shape, minval=0., maxval=1., dtype=self.dtype, seed=seed)
   return self._quantile(probs, loc=loc, scale=scale)
 def uniform_in_circle(seed):
     coords = samplers.uniform([6, 2],
                               minval=-1.0,
                               maxval=1.0,
                               seed=seed)
     radii = tf.reduce_sum(coords * coords, axis=-1)
     good = tf.less(radii, 1)
     return (coords, good)
    def test_normal_cdf_gradients(self):
        dist = tfd.Normal(loc=3., scale=2.)
        bij = tfbe.ScalarFunctionWithInferredInverse(dist.cdf)

        ys = self.evaluate(samplers.uniform([100], seed=test_util.test_seed()))
        xs_true, grad_true = tfp.math.value_and_gradient(dist.quantile, ys)
        xs_numeric, grad_numeric = tfp.math.value_and_gradient(bij.inverse, ys)
        self.assertAllClose(xs_true, xs_numeric, atol=1e-4)
        self.assertAllClose(grad_true, grad_numeric, rtol=1e-4)
示例#19
0
 def loop_body(should_continue, samples, prod, num_iters, seed):
   u_seed, next_seed = samplers.split_seed(seed)
   prod = prod * samplers.uniform(
       sample_shape, dtype=internal_dtype, seed=u_seed)
   accept = should_continue & (prod <= exp_neg_rate)
   samples = tf.where(accept, num_iters, samples)
   return [
       should_continue & (~accept), samples, prod, num_iters + 1, next_seed
   ]
示例#20
0
 def _sample_n(self, n, seed=None):
     with tf.name_scope("sample_n"):
         low = tf.convert_to_tensor(self.low)
         high = tf.convert_to_tensor(self.high)
         shape = tf.concat([[n], self._batch_shape_tensor(low=low, high=high)], 0)
         samples = samplers.uniform(shape=shape, dtype=tf.float32, seed=seed)
         return low + tf.cast(
             tf.cast(self._range(low=low, high=high), tf.float32) * samples,
             self.dtype,
         )
    def test_sampled_latents_have_correct_marginals(self, use_slope):
        seed = test_util.test_seed(sampler_type='stateless')
        residuals_seed, is_missing_seed, level_seed = samplers.split_seed(
            seed, 3, 'test_sampled_level_has_correct_marginals')

        num_timesteps = 10

        observed_residuals = samplers.normal([3, 1, num_timesteps],
                                             seed=residuals_seed)
        is_missing = samplers.uniform([3, 1, num_timesteps],
                                      seed=is_missing_seed) > 0.8
        level_scale = 1.5 * tf.ones([3, 1])
        observation_noise_scale = 0.2 * tf.ones([3, 1])

        if use_slope:
            initial_state_prior = tfd.MultivariateNormalDiag(
                loc=[-30., 2.], scale_diag=[1., 0.2])
            slope_scale = 0.5 * tf.ones([3, 1])
            ssm = tfp.sts.LocalLinearTrendStateSpaceModel(
                num_timesteps=num_timesteps,
                initial_state_prior=initial_state_prior,
                observation_noise_scale=observation_noise_scale,
                level_scale=level_scale,
                slope_scale=slope_scale)
        else:
            initial_state_prior = tfd.MultivariateNormalDiag(loc=[-30.],
                                                             scale_diag=[100.])
            slope_scale = None
            ssm = tfp.sts.LocalLevelStateSpaceModel(
                num_timesteps=num_timesteps,
                initial_state_prior=initial_state_prior,
                observation_noise_scale=observation_noise_scale,
                level_scale=level_scale)

        posterior_means, posterior_covs = ssm.posterior_marginals(
            observed_residuals[..., tf.newaxis], mask=is_missing)
        latents_samples = gibbs_sampler._resample_latents(
            observed_residuals=observed_residuals,
            level_scale=level_scale,
            slope_scale=slope_scale,
            observation_noise_scale=observation_noise_scale,
            initial_state_prior=initial_state_prior,
            is_missing=is_missing,
            sample_shape=10000,
            seed=level_seed)

        (posterior_means_, posterior_covs_, latents_means_,
         latents_covs_) = self.evaluate((posterior_means, posterior_covs,
                                         tf.reduce_mean(latents_samples,
                                                        axis=0),
                                         tfp.stats.covariance(latents_samples,
                                                              sample_axis=0,
                                                              event_axis=-1)))
        self.assertAllClose(latents_means_, posterior_means_, atol=0.1)
        self.assertAllClose(latents_covs_, posterior_covs_, atol=0.1)
示例#22
0
 def testLaplaceQuantile(self):
     qs = self.evaluate(
         tf.concat(
             [[0., 1],
              samplers.uniform(
                  [10], minval=.1, maxval=.9, seed=test_util.test_seed())],
             axis=0))
     d = tfd.Laplace(loc=1., scale=1.3, validate_args=True)
     vals = d.quantile(qs)
     self.assertAllClose([-np.inf, np.inf], vals[:2])
     self.assertAllClose(qs[2:], d.cdf(vals[2:]))
示例#23
0
 def _sample_n(self, n, seed=None):
     loc = tf.convert_to_tensor(self.loc)
     scale = tf.convert_to_tensor(self.scale)
     shape = ps.concat(
         [[n], self._batch_shape_tensor(loc=loc, scale=scale)], 0)
     probs = samplers.uniform(shape,
                              minval=0.,
                              maxval=1.,
                              dtype=self.dtype,
                              seed=seed)
     # Quantile function.
     return loc + scale * tf.tan((np.pi / 2) * probs)
 def _sample_n(self, n, seed=None):
   samples = tf.convert_to_tensor(self._samples)
   indices = samplers.uniform([n], maxval=self._compute_num_samples(samples),
                              dtype=tf.int32, seed=seed)
   draws = tf.gather(samples, indices, axis=self._samples_axis)
   axes = ps.concat(
       [[self._samples_axis],
        ps.range(self._samples_axis, dtype=tf.int32),
        ps.range(self._event_ndims, dtype=tf.int32) + self._samples_axis + 1],
       axis=0)
   draws = tf.transpose(a=draws, perm=axes)
   return draws
示例#25
0
 def randomized_computation(seed):
   """Internal randomized computation."""
   proposal_seed, mask_seed = samplers.split_seed(
       seed, salt='batched_rejection_sampler')
   proposed_samples, proposed_values = proposal_fn(proposal_seed)
   good_samples_mask = tf.less_equal(
       proposed_values * samplers.uniform(
           prefer_static.shape(proposed_samples),
           seed=mask_seed,
           dtype=dtype),
       target_fn(proposed_samples))
   return proposed_samples, good_samples_mask
示例#26
0
def _setup_mcmc(model, n_chains, seed, **pins):
    """Construct bijector and transforms needed for windowed MCMC.

  This pins the initial model, constructs a bijector that unconstrains and
  flattens each dimension and adds a leading batch shape of `n_chains`,
  initializes a point in the unconstrained space, and constructs a transformed
  log probability using the bijector.

  Note that we must manually construct this target log probability instead of
  using a transformed transition kernel because the TTK assumes the shape
  in is the same as the shape out.

  Args:
    model: `tfd.JointDistribution`
      The model to sample from.
    n_chains: int
      Number of chains (independent examples) to run.
    seed: A seed for reproducible sampling.
    **pins:
      Values passed to `model.experimental_pin`.


  Returns:
    target_log_prob_fn: Callable on the transformed space.
    initial_transformed_position: `tf.Tensor`, sampled from a uniform (-2, 2).
    bijector: `tfb.Bijector` instance, which unconstrains and flattens.
  """
    pinned_model = model.experimental_pin(**pins)
    bijector = _get_flat_unconstraining_bijector(pinned_model)
    initial_position = pinned_model.sample_unpinned(n_chains)
    initial_transformed_position = bijector.forward(initial_position)

    # Jitter init
    seeds = samplers.split_seed(seed, n=len(initial_transformed_position))
    unconstrained_unif_init_position = []
    for p, seed in zip(initial_transformed_position, seeds):
        unconstrained_unif_init_position.append(
            samplers.uniform(ps.shape(p),
                             minval=-2.,
                             maxval=2.,
                             seed=seed,
                             dtype=p.dtype))

    # pylint: disable=g-long-lambda
    def target_log_prob_fn(*args):
        return (
            pinned_model.unnormalized_log_prob(bijector.inverse(args)) +
            bijector.inverse_log_det_jacobian(
                args, event_ndims=[1 for _ in initial_transformed_position]))

    # pylint: enable=g-long-lambda
    return target_log_prob_fn, unconstrained_unif_init_position, bijector
示例#27
0
  def generate_and_test_samples(seed):
    """Generate and test samples."""
    u_seed, v_seed = samplers.split_seed(seed)

    u = samplers.uniform(sample_shape, dtype=internal_dtype, seed=u_seed)
    u = u - 0.5
    u_shifted = 0.5 - tf.math.abs(u)

    v = samplers.uniform(sample_shape, dtype=internal_dtype, seed=v_seed)

    k = tf.math.floor(((2. * a) / u_shifted + b) * u + rate + 0.43)

    good_sample_mask = (u_shifted >= 0.07) & (v <= 0.9277 - 3.6224 / (b - 2.))

    s = tf.math.log(v * inverse_alpha / (a / tf.math.square(u_shifted) + b))
    t = -rate + k * log_rate - tf.math.lgamma(k + 1)

    good_sample_mask = good_sample_mask | (s <= t)
    # Make sure the sample is within bounds.
    good_sample_mask = good_sample_mask & (k >= 0) & ((u_shifted >= 0.013) |
                                                      (v <= u_shifted))
    return k, good_sample_mask
示例#28
0
  def _random_regression_task(self, num_outputs, num_features, batch_shape=(),
                              weights=None, observation_noise_scale=0.1,
                              seed=None):
    design_seed, weights_seed, noise_seed = samplers.split_seed(seed, n=3)
    batch_shape = list(batch_shape)

    design_matrix = samplers.uniform(batch_shape + [num_outputs, num_features],
                                     seed=design_seed)
    if weights is None:
      weights = samplers.normal(batch_shape + [num_features], seed=weights_seed)
    targets = (tf.linalg.matvec(design_matrix, weights) +
               observation_noise_scale * samplers.normal(
                   batch_shape + [num_outputs], seed=noise_seed))
    return design_matrix, weights, targets
示例#29
0
 def _sample_n(self, n, seed=None):
     concentration = tf.convert_to_tensor(self.concentration)
     scale = tf.convert_to_tensor(self.scale)
     shape = ps.concat([[n],
                        self._batch_shape_tensor(
                            concentration=concentration, scale=scale)],
                       axis=0)
     sampled = samplers.uniform(shape,
                                maxval=1.,
                                seed=seed,
                                dtype=self.dtype)
     log_sample = tf.math.log(
         scale) - tf.math.log1p(-sampled) / concentration
     return tf.exp(log_sample)
示例#30
0
 def test_chandrupatla_max_iterations(self):
   expected_roots = samplers.normal(
       [4, 3], seed=test_util.test_seed(sampler_type='stateless'))
   max_iterations = samplers.uniform(
       [4, 3], minval=1, maxval=6, dtype=tf.int32,
       seed=test_util.test_seed(sampler_type='stateless'))
   _, _, num_iterations = tfp.math.find_root_chandrupatla(
       objective_fn=lambda x: (x - expected_roots)**3,
       low=-1000000.,
       high=1000000.,
       position_tolerance=1e-8,
       max_iterations=max_iterations)
   self.assertAllClose(num_iterations,
                       max_iterations)