Example #1
0
 def run_hmc():
     return mcmc.sample_chain(
         num_results=num_results,
         current_state=initial_state,
         num_burnin_steps=num_warmup_steps,
         kernel=mcmc.SimpleStepSizeAdaptation(
             inner_kernel=mcmc.TransformedTransitionKernel(
                 inner_kernel=mcmc.HamiltonianMonteCarlo(
                     target_log_prob_fn=target_log_prob_fn,
                     step_size=initial_step_size,
                     num_leapfrog_steps=num_leapfrog_steps,
                     state_gradients_are_stopped=True,
                     seed=seed()),
                 bijector=[
                     param.bijector for param in model.parameters
                 ]),
             num_adaptation_steps=int(num_warmup_steps * 0.8),
             adaptation_rate=tf.convert_to_tensor(
                 value=0.1, dtype=initial_state[0].dtype)),
         parallel_iterations=1 if seed is not None else 10)
Example #2
0
def hmc(target, model, model_config, step_size_init, initial_states, reparam):
    """Runs HMC to sample from the given target distribution."""
    if reparam == 'CP':
        to_centered = lambda x: x
    elif reparam == 'NCP':
        to_centered = model_config.to_centered
    else:
        to_centered = model_config.make_to_centered(**reparam)

    model_config = model_config._replace(to_centered=to_centered)

    initial_states = list(initial_states)  # Variational samples.
    shapes = [s[0].shape for s in initial_states]

    vectorized_target = vectorize_log_joint_fn(target)

    per_chain_initial_step_sizes = [
        np.array(step_size_init[i] * np.ones(initial_states[i].shape) /
                 (FLAGS.num_leapfrog_steps / 4.)**2).astype(np.float32)
        for i in range(len(step_size_init))
    ]

    kernel = mcmc.SimpleStepSizeAdaptation(
        inner_kernel=mcmc.HamiltonianMonteCarlo(
            target_log_prob_fn=vectorized_target,
            step_size=per_chain_initial_step_sizes,
            num_leapfrog_steps=FLAGS.num_leapfrog_steps),
        adaptation_rate=0.05,
        num_adaptation_steps=FLAGS.num_adaptation_steps)

    states_orig, kernel_results = mcmc.sample_chain(
        num_results=FLAGS.num_samples,
        num_burnin_steps=FLAGS.num_burnin_steps,
        current_state=initial_states,
        kernel=kernel,
        num_steps_between_results=1)

    states_transformed = transform_mcmc_states(states_orig, to_centered)
    ess = tfp.mcmc.effective_sample_size(states_transformed)

    return states_orig, kernel_results, states_transformed, ess
Example #3
0
def fit_with_hmc(model,
                 observed_time_series,
                 num_results=100,
                 num_warmup_steps=50,
                 num_leapfrog_steps=15,
                 initial_state=None,
                 initial_step_size=None,
                 chain_batch_shape=(),
                 num_variational_steps=150,
                 variational_optimizer=None,
                 seed=None,
                 name=None):
    """Draw posterior samples using Hamiltonian Monte Carlo (HMC).

  Markov chain Monte Carlo (MCMC) methods are considered the gold standard of
  Bayesian inference; under suitable conditions and in the limit of infinitely
  many draws they generate samples from the true posterior distribution. HMC [1]
  uses gradients of the model's log-density function to propose samples,
  allowing it to exploit posterior geometry. However, it is computationally more
  expensive than variational inference and relatively sensitive to tuning.

  This method attempts to provide a sensible default approach for fitting
  StructuralTimeSeries models using HMC. It first runs variational inference as
  a fast posterior approximation, and initializes the HMC sampler from the
  variational posterior, using the posterior standard deviations to set
  per-variable step sizes (equivalently, a diagonal mass matrix). During the
  warmup phase, it adapts the step size to target an acceptance rate of 0.75,
  which is thought to be in the desirable range for optimal mixing [2].


  Args:
    model: An instance of `StructuralTimeSeries` representing a
      time-series model. This represents a joint distribution over
      time-series and their parameters with batch shape `[b1, ..., bN]`.
    observed_time_series: `float` `Tensor` of shape
      `concat([sample_shape, model.batch_shape, [num_timesteps, 1]]) where
      `sample_shape` corresponds to i.i.d. observations, and the trailing `[1]`
      dimension may (optionally) be omitted if `num_timesteps > 1`. May
      optionally be an instance of `tfp.sts.MaskedTimeSeries`, which includes
      a mask `Tensor` to specify timesteps with missing observations.
    num_results: Integer number of Markov chain draws.
      Default value: `100`.
    num_warmup_steps: Integer number of steps to take before starting to
      collect results. The warmup steps are also used to adapt the step size
      towards a target acceptance rate of 0.75.
      Default value: `50`.
    num_leapfrog_steps: Integer number of steps to run the leapfrog integrator
      for. Total progress per HMC step is roughly proportional to
      `step_size * num_leapfrog_steps`.
      Default value: `15`.
    initial_state: Optional Python `list` of `Tensor`s, one for each model
      parameter, representing the initial state(s) of the Markov chain(s). These
      should have shape `concat([chain_batch_shape, param.prior.batch_shape,
      param.prior.event_shape])`. If `None`, the initial state is set
      automatically using a sample from a variational posterior.
      Default value: `None`.
    initial_step_size: Python `list` of `Tensor`s, one for each model parameter,
      representing the step size for the leapfrog integrator. Must
      broadcast with the shape of `initial_state`. Larger step sizes lead to
      faster progress, but too-large step sizes make rejection exponentially
      more likely. If `None`, the step size is set automatically using the
      standard deviation of a variational posterior.
      Default value: `None`.
    chain_batch_shape: Batch shape (Python `tuple`, `list`, or `int`) of chains
      to run in parallel.
      Default value: `[]` (i.e., a single chain).
    num_variational_steps: Python `int` number of steps to run the variational
      optimization to determine the initial state and step sizes.
      Default value: `150`.
    variational_optimizer: Optional `tf.train.Optimizer` instance to use in
      the variational optimization. If `None`, defaults to
      `tf.train.AdamOptimizer(0.1)`.
      Default value: `None`.
    seed: Python integer to seed the random number generator.
    name: Python `str` name prefixed to ops created by this function.
      Default value: `None` (i.e., 'fit_with_hmc').

  Returns:
    samples: Python `list` of `Tensors` representing posterior samples of model
      parameters, with shapes `[concat([[num_results], chain_batch_shape,
      param.prior.batch_shape, param.prior.event_shape]) for param in
      model.parameters]`.
    kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of
      `Tensor`s representing internal calculations made within the HMC sampler.

  #### Examples

  Assume we've built a structural time-series model:

  ```python
    day_of_week = tfp.sts.Seasonal(
        num_seasons=7,
        observed_time_series=observed_time_series,
        name='day_of_week')
    local_linear_trend = tfp.sts.LocalLinearTrend(
        observed_time_series=observed_time_series,
        name='local_linear_trend')
    model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],
                        observed_time_series=observed_time_series)
  ```

  To draw posterior samples using HMC under default settings:

  ```python
  samples, kernel_results = tfp.sts.fit_with_hmc(model, observed_time_series)

  with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    samples_, kernel_results_ = sess.run((samples, kernel_results))

  print("acceptance rate: {}".format(
    np.mean(kernel_results_.inner_results.is_accepted, axis=0)))
  print("posterior means: {}".format(
    {param.name: np.mean(param_draws, axis=0)
     for (param, param_draws) in zip(model.parameters, samples_)}))
  ```

  We can also run multiple chains. This may help diagnose convergence issues
  and allows us to exploit vectorization to draw samples more quickly, although
  warmup still requires the same number of sequential steps.

  ```python
  from matplotlib import pylab as plt

  samples, kernel_results = tfp.sts.fit_with_hmc(
    model, observed_time_series, chain_batch_shape=[10])

  with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    samples_, kernel_results_ = sess.run((samples, kernel_results))

  print("acceptance rate: {}".format(
    np.mean(kernel_results_.inner_results.inner_results.is_accepted, axis=0)))

  # Plot the sampled traces for each parameter. If the chains have mixed, their
  # traces should all cover the same region of state space, frequently crossing
  # over each other.
  for (param, param_draws) in zip(model.parameters, samples_):
    if param.prior.event_shape.ndims > 0:
      print("Only plotting traces for scalar parameters, skipping {}".format(
        param.name))
      continue
    plt.figure(figsize=[10, 4])
    plt.title(param.name)
    plt.plot(param_draws)
    plt.ylabel(param.name)
    plt.xlabel("HMC step")

  # Combining the samples from multiple chains into a single dimension allows
  # us to easily pass sampled parameters to downstream forecasting methods.
  combined_samples_ = [np.reshape(param_draws,
                                  [-1] + list(param_draws.shape[2:]))
                       for param_draws in samples_]
  ```

  For greater flexibility, you may prefer to implement your own sampler using
  the TensorFlow Probability primitives in `tfp.mcmc`. The following recipe
  constructs a basic HMC sampler, using a `TransformedTransitionKernel` to
  incorporate constraints on the parameter space.

  ```python
  transformed_hmc_kernel = mcmc.TransformedTransitionKernel(
      inner_kernel=mcmc.SimpleStepSizeAdaptation(
          inner_kernel=mcmc.HamiltonianMonteCarlo(
              target_log_prob_fn=model.joint_log_prob(observed_time_series),
              step_size=step_size,
              num_leapfrog_steps=num_leapfrog_steps,
              state_gradients_are_stopped=True,
              seed=seed),
          num_adaptation_steps = int(0.8 * num_warmup_steps)),
      bijector=[param.bijector for param in model.parameters])

  # Initialize from a Uniform[-2, 2] distribution in unconstrained space.
  initial_state = [tfp.sts.sample_uniform_initial_state(
    param, return_constrained=True) for param in model.parameters]

  samples, kernel_results = tfp.mcmc.sample_chain(
    kernel=transformed_hmc_kernel,
    num_results=num_results,
    current_state=initial_state,
    num_burnin_steps=num_warmup_steps)
  ```

  #### References

  [1]: Radford Neal. MCMC Using Hamiltonian Dynamics. _Handbook of Markov Chain
       Monte Carlo_, 2011. https://arxiv.org/abs/1206.1901
  [2]  M.J. Betancourt, Simon Byrne, and Mark Girolami. Optimizing The
       Integrator Step Size for Hamiltonian Monte Carlo.
       https://arxiv.org/abs/1411.6669

  """
    with tf.compat.v1.name_scope(name,
                                 'fit_with_hmc',
                                 values=[observed_time_series]) as name:
        seed = tfd.SeedStream(seed, salt='StructuralTimeSeries_fit_with_hmc')

        # Initialize state and step sizes from a variational posterior if not
        # specified.
        if initial_step_size is None or initial_state is None:

            # To avoid threading variational distributions through the training
            # while loop, we build our own copy here. `make_template` ensures
            # that our variational distributions share the optimized parameters.
            def make_variational():
                return build_factored_variational_loss(
                    model,
                    observed_time_series,
                    init_batch_shape=chain_batch_shape,
                    seed=seed())

            make_variational = tf.compat.v1.make_template(
                'make_variational', make_variational)
            _, variational_distributions = make_variational()
            minimize_op = _minimize_in_graph(
                build_loss_fn=lambda: make_variational()[
                    0],  # return just the loss.
                num_steps=num_variational_steps,
                optimizer=variational_optimizer)

            with tf.control_dependencies([minimize_op]):
                if initial_state is None:
                    initial_state = [
                        tf.stop_gradient(d.sample())
                        for d in variational_distributions.values()
                    ]

                # Set step sizes using the unconstrained variational distribution.
                if initial_step_size is None:
                    initial_step_size = [
                        transformed_q.distribution.stddev() for transformed_q
                        in variational_distributions.values()
                    ]

        # Multiple chains manifest as an extra param batch dimension, so we need to
        # add a corresponding batch dimension to `observed_time_series`.
        observed_time_series = sts_util.pad_batch_dimension_for_multiple_chains(
            observed_time_series, model, chain_batch_shape=chain_batch_shape)

        # Run HMC to sample from the posterior on parameters.
        samples, kernel_results = mcmc.sample_chain(
            num_results=num_results,
            current_state=initial_state,
            num_burnin_steps=num_warmup_steps,
            kernel=mcmc.SimpleStepSizeAdaptation(
                inner_kernel=mcmc.TransformedTransitionKernel(
                    inner_kernel=mcmc.HamiltonianMonteCarlo(
                        target_log_prob_fn=model.joint_log_prob(
                            observed_time_series),
                        step_size=initial_step_size,
                        num_leapfrog_steps=num_leapfrog_steps,
                        state_gradients_are_stopped=True,
                        seed=seed()),
                    bijector=[param.bijector for param in model.parameters]),
                num_adaptation_steps=int(num_warmup_steps * 0.8),
                adaptation_rate=tf.convert_to_tensor(
                    value=0.1, dtype=initial_state[0].dtype)),
            parallel_iterations=1 if seed is not None else 10)

        return samples, kernel_results
Example #4
0
def hmc_interleaved(model_config, target_cp, target_ncp, num_leapfrog_steps_cp,
                    num_leapfrog_steps_ncp, step_size_cp, step_size_ncp,
                    initial_states_cp):

    model_cp = model_config.model

    initial_states = list(initial_states_cp)  # Variational samples.
    shapes = [s[0].shape for s in initial_states]

    cp_step_sizes = [
        np.array(np.ones(shape=np.concatenate(
            [[FLAGS.num_chains], shapes[i]]).astype(int)) * step_size_cp[i],
                 dtype=np.float32) / np.float32(
                     (num_leapfrog_steps_cp / 4.)**2)
        for i in range(len(step_size_cp))
    ]

    ncp_step_sizes = [
        np.array(np.ones(shape=np.concatenate(
            [[FLAGS.num_chains], shapes[i]]).astype(int)) * step_size_ncp[i],
                 dtype=np.float32) / np.float32(
                     (num_leapfrog_steps_ncp / 4.)**2)
        for i in range(len(step_size_ncp))
    ]

    vectorized_target_cp = vectorize_log_joint_fn(target_cp)
    vectorized_target_ncp = vectorize_log_joint_fn(target_ncp)

    inner_kernel_cp = mcmc.SimpleStepSizeAdaptation(
        inner_kernel=mcmc.HamiltonianMonteCarlo(
            target_log_prob_fn=vectorized_target_cp,
            step_size=cp_step_sizes,
            num_leapfrog_steps=num_leapfrog_steps_cp,
            state_gradients_are_stopped=True),
        adaptation_rate=0.05,
        target_accept_prob=0.75,
        num_adaptation_steps=FLAGS.num_adaptation_steps)

    inner_kernel_ncp = mcmc.SimpleStepSizeAdaptation(
        inner_kernel=mcmc.HamiltonianMonteCarlo(
            target_log_prob_fn=vectorized_target_ncp,
            step_size=ncp_step_sizes,
            num_leapfrog_steps=num_leapfrog_steps_ncp,
            state_gradients_are_stopped=True),
        adaptation_rate=0.05,
        target_accept_prob=0.75,
        num_adaptation_steps=FLAGS.num_adaptation_steps)

    to_centered = model_config.to_centered
    to_noncentered = model_config.to_noncentered

    kernel = interleaved.Interleaved(inner_kernel_cp, inner_kernel_ncp,
                                     vectorise_transform(to_centered),
                                     vectorise_transform(to_noncentered))

    def do_sampling():
        return mcmc.sample_chain(num_results=FLAGS.num_samples,
                                 num_burnin_steps=FLAGS.num_burnin_steps,
                                 current_state=initial_states,
                                 kernel=kernel,
                                 num_steps_between_results=1)

    # Compiling the sampler speeds up inference, and suppresses errors from
    # invalid matrix decompositions (which instead become NaNs -> rejected).
    states, kernel_results = tf.xla.experimental.compile(do_sampling)

    ess = tfp.mcmc.effective_sample_size(states)

    return states, kernel_results, ess
Example #5
0
def hmc_interleaved(model_config, target_cp, target_ncp, num_leapfrog_steps_cp,
                    num_leapfrog_steps_ncp, step_size_cp, step_size_ncp,
                    initial_states_cp):

    model_cp = model_config.model

    initial_states = list(initial_states_cp)  # Variational samples.
    shapes = [s[0].shape for s in initial_states]

    cp_step_sizes = [
        np.array(np.ones(shape=np.concatenate(
            [[FLAGS.num_chains], shapes[i]]).astype(int)) * step_size_cp[i],
                 dtype=np.float32) / np.float32(
                     (num_leapfrog_steps_cp / 4.)**2)
        for i in range(len(step_size_cp))
    ]

    ncp_step_sizes = [
        np.array(np.ones(shape=np.concatenate(
            [[FLAGS.num_chains], shapes[i]]).astype(int)) * step_size_ncp[i],
                 dtype=np.float32) / np.float32(
                     (num_leapfrog_steps_ncp / 4.)**2)
        for i in range(len(step_size_ncp))
    ]

    vectorized_target_cp = vectorize_log_joint_fn(target_cp)
    vectorized_target_ncp = vectorize_log_joint_fn(target_ncp)

    inner_kernel_cp = mcmc.SimpleStepSizeAdaptation(
        inner_kernel=mcmc.HamiltonianMonteCarlo(
            target_log_prob_fn=vectorized_target_cp,
            step_size=cp_step_sizes,
            num_leapfrog_steps=num_leapfrog_steps_cp),
        adaptation_rate=0.05,
        target_accept_prob=0.75,
        num_adaptation_steps=FLAGS.num_adaptation_steps)

    inner_kernel_ncp = mcmc.SimpleStepSizeAdaptation(
        inner_kernel=mcmc.HamiltonianMonteCarlo(
            target_log_prob_fn=vectorized_target_ncp,
            step_size=ncp_step_sizes,
            num_leapfrog_steps=num_leapfrog_steps_ncp),
        adaptation_rate=0.05,
        target_accept_prob=0.75,
        num_adaptation_steps=FLAGS.num_adaptation_steps)

    to_centered = model_config.to_centered
    to_noncentered = model_config.to_noncentered

    kernel = interleaved.Interleaved(inner_kernel_cp, inner_kernel_ncp,
                                     vectorise_transform(to_centered),
                                     vectorise_transform(to_noncentered))

    states, kernel_results = mcmc.sample_chain(
        num_results=FLAGS.num_samples,
        num_burnin_steps=FLAGS.num_burnin_steps,
        current_state=initial_states,
        kernel=kernel,
        num_steps_between_results=1)

    ess = tfp.mcmc.effective_sample_size(states)

    return states, kernel_results, ess