예제 #1
0
 def test_atypical_nesting(self):
     results = FakeAtypicalNestingResults(
         unique_atypical_nesting=0, atypical_inner_results=FakeResults(1))
     self.assertTrue(unnest.has_nested(results, 'unique_atypical_nesting'))
     self.assertTrue(unnest.has_nested(results, 'unique_core'))
     self.assertFalse(hasattr(results, 'unique_core'))
     self.assertFalse(unnest.has_nested(results, 'foo'))
     self.assertEqual(
         results.unique_atypical_nesting,
         unnest.get_innermost(results, 'unique_atypical_nesting'))
     self.assertEqual(
         results.unique_atypical_nesting,
         unnest.get_outermost(results, 'unique_atypical_nesting'))
     self.assertEqual(results.atypical_inner_results.unique_core,
                      unnest.get_innermost(results, 'unique_core'))
     self.assertEqual(results.atypical_inner_results.unique_core,
                      unnest.get_outermost(results, 'unique_core'))
     self.assertRaises(AttributeError,
                       lambda: unnest.get_innermost(results, 'foo'))
     self.assertRaises(AttributeError,
                       lambda: unnest.get_outermost(results, 'foo'))
     self.assertIs(unnest.get_innermost(results, 'foo', SINGLETON),
                   SINGLETON)
     self.assertIs(unnest.get_outermost(results, 'foo', SINGLETON),
                   SINGLETON)
예제 #2
0
    def _process_results(self):
        """Process outputs to extract useful data."""
        if unnest.has_nested(self.kernel, 'reducer'):
            reducers = unnest.get_outermost(self.kernel, 'reducer')
            # Finalize streaming calculations.
            self.reductions = nest.map_structure_up_to(
                reducers,
                lambda r, s: r.finalize(s),
                reducers,
                unnest.get_outermost(self.results, 'reduction_results'),
                check_types=False)

            # Grab useful reductions.
            def process_reductions(reducer, reduction):
                if isinstance(reducer, tracing_reducer.TracingReducer):
                    self.all_states, self.trace = reduction

            nest.map_structure_up_to(reducers,
                                     process_reductions,
                                     reducers,
                                     self.reductions,
                                     check_types=False)

        if unnest.has_nested(self.results, 'new_step_size'):
            self.new_step_size = unnest.get_outermost(self.results,
                                                      'new_step_size')
예제 #3
0
def _slow_window(*,
                 kind,
                 proposal_kernel_kwargs,
                 dual_averaging_kwargs,
                 num_draws,
                 initial_position,
                 initial_running_variance,
                 bijector,
                 trace_fn,
                 seed):
  """Sample using both step size and mass matrix adaptation."""
  dual_averaging_kwargs = dict(dual_averaging_kwargs)
  dual_averaging_kwargs.setdefault('num_adaptation_steps', num_draws)
  kernel = make_slow_adapt_kernel(
      kind=kind,
      proposal_kernel_kwargs=proposal_kernel_kwargs,
      dual_averaging_kwargs=dual_averaging_kwargs,
      initial_running_variance=initial_running_variance)
  with warnings.catch_warnings():
    warnings.simplefilter('ignore')

    draws, trace, fkr = sample.sample_chain(
        num_draws,
        initial_position,
        kernel=kernel,
        return_final_kernel_results=True,
        # pylint: disable=g-long-lambda
        trace_fn=lambda state, pkr: trace_fn(state,
                                             bijector,
                                             tf.constant(True),
                                             pkr.inner_results.inner_results),
        seed=seed)
    # pylint: enable=g-long-lambda

  draw_and_chain_axes = [0, 1]
  weighted_running_variance = []
  for d in list(draws):
    prev_mean, prev_var = tf.nn.moments(d[-num_draws // 2:],
                                        axes=draw_and_chain_axes)
    num_samples = tf.cast(
        num_draws / 2,
        dtype=dtype_util.common_dtype([prev_mean, prev_var], tf.float32))
    weighted_running_variance.append(sample_stats.RunningVariance.from_stats(
        num_samples=num_samples,
        mean=prev_mean,
        variance=prev_var))

  step_size = unnest.get_outermost(fkr, 'step_size')
  momentum_distribution = unnest.get_outermost(fkr, 'momentum_distribution')

  return draws, trace, step_size, weighted_running_variance, momentum_distribution
예제 #4
0
 def test_flat(self):
   results = FakeResults(0)
   self.assertTrue(unnest.has_nested(results, 'unique_core'))
   self.assertFalse(unnest.has_nested(results, 'foo'))
   self.assertEqual(results.unique_core,
                    unnest.get_innermost(results, 'unique_core'))
   self.assertEqual(results.unique_core,
                    unnest.get_outermost(results, 'unique_core'))
   self.assertRaises(
       AttributeError, lambda: unnest.get_innermost(results, 'foo'))
   self.assertRaises(
       AttributeError, lambda: unnest.get_outermost(results, 'foo'))
   self.assertIs(unnest.get_innermost(results, 'foo', SINGLETON), SINGLETON)
   self.assertIs(unnest.get_outermost(results, 'foo', SINGLETON), SINGLETON)
예제 #5
0
 def realized_acceptance_rate(self):
     """Return realized acceptance rate of the samples."""
     try:
         is_accepted = unnest.get_outermost(self.trace, 'is_accepted')
     except AttributeError:
         return
     return tf.math.reduce_mean(tf.cast(is_accepted, tf.float32), axis=0)
예제 #6
0
def _fast_window(*, kind, proposal_kernel_kwargs, dual_averaging_kwargs,
                 num_draws, initial_position, bijector, trace_fn, seed):
    """Sample using just step size adaptation."""
    dual_averaging_kwargs.update({'num_adaptation_steps': num_draws})
    kernel = make_fast_adapt_kernel(
        kind=kind,
        proposal_kernel_kwargs=proposal_kernel_kwargs,
        dual_averaging_kwargs=dual_averaging_kwargs)
    with warnings.catch_warnings():
        warnings.simplefilter('ignore')
        draws, trace, fkr = sample.sample_chain(
            num_draws,
            initial_position,
            kernel=kernel,
            return_final_kernel_results=True,
            # pylint: disable=g-long-lambda
            trace_fn=lambda state, pkr: trace_fn(
                state, bijector, tf.constant(True), pkr.inner_results),
            seed=seed)
        # pylint: enable=g-long-lambda

    draw_and_chain_axes = [0, 1]
    prev_mean, prev_var = tf.nn.moments(draws[-num_draws // 2:],
                                        axes=draw_and_chain_axes)

    num_samples = tf.cast(num_draws / 2,
                          dtype=dtype_util.common_dtype([prev_mean, prev_var],
                                                        tf.float32))
    weighted_running_variance = sample_stats.RunningVariance.from_stats(
        num_samples=num_samples, mean=prev_mean, variance=prev_var)

    step_size = unnest.get_outermost(fkr, 'step_size')
    return draws, trace, step_size, weighted_running_variance
예제 #7
0
def _fast_adapt_window(
    num_draws,
    joint_log_prob_fn,
    initial_position,
    hmc_kernel_kwargs,
    dual_averaging_kwargs,
    event_kernel_kwargs,
    trace_fn=None,
    seed=None,
):
    """
    In the fast adaptation window, we use the
    `DualAveragingStepSizeAdaptation` kernel
    to wrap an HMC kernel.

    :param num_draws: Number of MCMC draws in window
    :param joint_log_prob_fn: joint log posterior function
    :param initial_position: initial state of the Markov chain
    :param hmc_kernel_kwargs: `HamiltonianMonteCarlo` kernel keywords args
    :param dual_averaging_kwargs: `DualAveragingStepSizeAdaptation` keyword args
    :param event_kernel_kwargs: EventTimesMH and Occult kernel args
    :param trace_fn: function to trace kernel results
    :param seed: optional random seed.
    :returns: draws, kernel results, the adapted HMC step size, and variance
              accumulator
    """
    kernel_list = [
        (
            0,
            make_hmc_fast_adapt_kernel(
                hmc_kernel_kwargs=hmc_kernel_kwargs,
                dual_averaging_kwargs=dual_averaging_kwargs,
            ),
        ),
        (1, make_event_multiscan_gibbs_step(**event_kernel_kwargs)),
    ]

    kernel = GibbsKernel(
        target_log_prob_fn=joint_log_prob_fn,
        kernel_list=kernel_list,
        name="fast_adapt",
    )

    draws, trace, fkr = tfp.mcmc.sample_chain(
        num_draws,
        initial_position,
        kernel=kernel,
        return_final_kernel_results=True,
        trace_fn=trace_fn,
        seed=seed,
    )

    weighted_running_variance = get_weighted_running_variance(draws[0])
    step_size = unnest.get_outermost(fkr.inner_results[0], "step_size")
    return draws, trace, step_size, weighted_running_variance
예제 #8
0
 def test_deeply_nested(self):
   results = _build_deeply_nested(0, 1, 2, 3, 4)
   self.assertTrue(unnest.has_nested(results, 'unique_nesting'))
   self.assertTrue(unnest.has_nested(results, 'unique_atypical_nesting'))
   self.assertTrue(unnest.has_nested(results, 'unique_core'))
   self.assertFalse(hasattr(self, 'unique_core'))
   self.assertFalse(unnest.has_nested(results, 'foo'))
   self.assertEqual(unnest.get_innermost(results, 'unique_nesting'), 3)
   self.assertEqual(unnest.get_outermost(results, 'unique_nesting'), 0)
   self.assertEqual(
       unnest.get_innermost(results, 'unique_atypical_nesting'), 2)
   self.assertEqual(
       unnest.get_outermost(results, 'unique_atypical_nesting'), 1)
   self.assertEqual(unnest.get_innermost(results, 'unique_core'), 4)
   self.assertEqual(unnest.get_outermost(results, 'unique_core'), 4)
   self.assertRaises(
       AttributeError, lambda: unnest.get_innermost(results, 'foo'))
   self.assertRaises(
       AttributeError, lambda: unnest.get_outermost(results, 'foo'))
   self.assertIs(unnest.get_innermost(results, 'foo', SINGLETON), SINGLETON)
   self.assertIs(unnest.get_outermost(results, 'foo', SINGLETON), SINGLETON)
예제 #9
0
def _slow_window(*, target_log_prob_fn, num_leapfrog_steps, num_draws,
                 initial_position, initial_running_variance, initial_step_size,
                 target_accept_prob, bijector, trace_fn, seed):
    """Sample using both step size and mass matrix adaptation."""
    kernel = make_slow_adapt_kernel(
        target_log_prob_fn=target_log_prob_fn,
        initial_running_variance=initial_running_variance,
        initial_step_size=initial_step_size,
        num_leapfrog_steps=num_leapfrog_steps,
        num_adaptation_steps=num_draws,
        target_accept_prob=target_accept_prob)
    with warnings.catch_warnings():
        warnings.simplefilter('ignore')

        draws, trace, fkr = sample.sample_chain(
            num_draws,
            initial_position,
            kernel=kernel,
            return_final_kernel_results=True,
            # pylint: disable=g-long-lambda
            trace_fn=lambda state, pkr: trace_fn(
                state, bijector, tf.constant(True), pkr.inner_results.
                inner_results),
            seed=seed)
        # pylint: enable=g-long-lambda

    draw_and_chain_axes = [0, 1]
    prev_mean, prev_var = tf.nn.moments(draws[-num_draws // 2:],
                                        axes=draw_and_chain_axes)
    num_samples = tf.cast(num_draws / 2,
                          dtype=dtype_util.common_dtype([prev_mean, prev_var],
                                                        tf.float32))
    weighted_running_variance = sample_stats.RunningVariance.from_stats(
        num_samples=num_samples, mean=prev_mean, variance=prev_var)

    step_size = unnest.get_outermost(fkr, 'step_size')
    momentum_distribution = unnest.get_outermost(fkr, 'momentum_distribution')

    return draws, trace, step_size, weighted_running_variance, momentum_distribution
예제 #10
0
def trace_results_fn(_, results):
    """Packs results into a dictionary"""
    results_dict = {}
    root_results = results.inner_results

    step_size = tf.convert_to_tensor(
        unnest.get_outermost(root_results[0], "step_size")
    )

    results_dict["hmc"] = {
        "is_accepted": unnest.get_innermost(root_results[0], "is_accepted"),
        "target_log_prob": unnest.get_innermost(
            root_results[0], "target_log_prob"
        ),
        "step_size": step_size,
    }

    def get_move_results(results):
        return {
            "is_accepted": results.is_accepted,
            "target_log_prob": results.accepted_results.target_log_prob,
            "proposed_delta": tf.stack(
                [
                    results.accepted_results.m,
                    results.accepted_results.t,
                    results.accepted_results.delta_t,
                    results.accepted_results.x_star,
                ]
            ),
        }

    res1 = root_results[1].inner_results
    results_dict["move/S->E"] = get_move_results(res1[0])
    results_dict["move/E->I"] = get_move_results(res1[1])
    results_dict["occult/S->E"] = get_move_results(res1[2])
    results_dict["occult/E->I"] = get_move_results(res1[3])

    return results_dict
예제 #11
0
def _slow_adapt_window(
    num_draws,
    joint_log_prob_fn,
    initial_position,
    initial_running_variance,
    hmc_kernel_kwargs,
    dual_averaging_kwargs,
    event_kernel_kwargs,
    trace_fn=None,
    seed=None,
):
    """In the slow adaptation phase, we adapt the HMC
    step size and mass matrix together.

    :param num_draws: number of MCMC iterations
    :param joint_log_prob_fn: the joint posterior density function
    :param initial_position: initial Markov chain state
    :param initial_running_variance: initial variance accumulator
    :param hmc_kernel_kwargs: `HamiltonianMonteCarlo` kernel kwargs
    :param dual_averaging_kwargs: `DualAveragingStepSizeAdaptation` kwargs
    :param event_kernel_kwargs: EventTimesMH and Occults kwargs
    :param trace_fn: result trace function
    :param seed: optional random seed
    :returns: draws, kernel results, adapted step size, the variance accumulator,
              and "learned" momentum distribution for the HMC.
    """
    kernel_list = [
        (
            0,
            make_hmc_slow_adapt_kernel(
                initial_running_variance,
                hmc_kernel_kwargs,
                dual_averaging_kwargs,
            ),
        ),
        (1, make_event_multiscan_gibbs_step(**event_kernel_kwargs)),
    ]

    kernel = GibbsKernel(
        target_log_prob_fn=joint_log_prob_fn,
        kernel_list=kernel_list,
        name="slow_adapt",
    )

    draws, trace, fkr = tfp.mcmc.sample_chain(
        num_draws,
        current_state=initial_position,
        kernel=kernel,
        return_final_kernel_results=True,
        trace_fn=trace_fn,
    )

    step_size = unnest.get_outermost(fkr.inner_results[0], "step_size")
    momentum_distribution = unnest.get_outermost(
        fkr.inner_results[0], "momentum_distribution"
    )

    weighted_running_variance = get_weighted_running_variance(draws[0])

    return (
        draws,
        trace,
        step_size,
        weighted_running_variance,
        momentum_distribution,
    )