示例#1
0
  def build(self, num_steps=None):
    """Build and return the specified kernel.

    Args:
      num_steps: An integer. Some kernel pieces (step adaptation) require
        knowing the number of steps to sample in advance; pass that in here.

    Returns:
      kernel: The configured `TransitionKernel`.
    """

    if num_steps is None:
      if self.step_adapter_class or self.show_progress:
        raise ValueError(
            '`num_steps` is required for step adaptation or progress bars.')

    def build_inner(target_log_prob_fn):
      kernel = self.core_class(**self._build_core_params(target_log_prob_fn))
      if self.step_adapter_class is not None:
        assert self.core_class in CORE_KERNELS_ADAPTABLE_STEPS
        kernel = self.step_adapter_class(
            **self._build_step_adapter_params(kernel, num_steps))
      return kernel

    if self.replica_exchange_params is not None:
      kernel = replica_exchange_mc.ReplicaExchangeMC(
          target_log_prob_fn=self.target_log_prob_fn,
          make_kernel_fn=build_inner,
          **self.replica_exchange_params)
    else:
      kernel = build_inner(self.target_log_prob_fn)

    if self.transform_params is not None:
      kernel = transformed_kernel.TransformedTransitionKernel(
          **dict(self.transform_params, inner_kernel=kernel))

    if self.num_steps_between_results > 0:
      kernel = sample_discarding_kernel.SampleDiscardingKernel(
          kernel, num_steps_between_results=self.num_steps_between_results)

    reducers = self._build_reducers(size=num_steps)
    if reducers:
      kernel = with_reductions.WithReductions(
          inner_kernel=kernel, reducer=reducers)

    return kernel
示例#2
0
def sample_fold(
    num_steps,
    current_state,
    previous_kernel_results=None,
    kernel=None,
    reducer=None,
    previous_reducer_state=None,
    return_final_reducer_states=False,
    num_burnin_steps=0,
    num_steps_between_results=0,
    parallel_iterations=10,
    seed=None,
    name=None,
):
    """Computes the requested reductions over the `kernel`'s samples.

  To wit, runs the given `kernel` for `num_steps` steps, and consumes
  the stream of samples with the given `Reducer`s' `one_step` method(s).
  This runs in constant memory (unless a given `Reducer` builds a
  large structure).

  The driver internally composes the correct onion of `WithReductions`
  and `SampleDiscardingKernel` to implement the requested optionally
  thinned reduction; however, the kernel results of those applied
  Transition Kernels will not be returned. Hence, if warm-restarting
  reductions is desired, one should manually build the Transition Kernel
  onion and use `tfp.experimental.mcmc.step_kernel`.

  An arbitrary collection of `reducer` can be provided, and the resulting
  finalized statistic(s) will be returned in an identical structure.

  This function can sample from and reduce over multiple chains, in parallel.
  Whether or not there are multiple chains is dictated by how the `kernel`
  treats its inputs.  Typically, the shape of the independent chains is shape of
  the result of the `target_log_prob_fn` used by the `kernel` when applied to
  the given `current_state`.

  Args:
    num_steps: Integer or scalar `Tensor` representing the number of `Reducer`
      steps.
    current_state: `Tensor` or Python `list` of `Tensor`s representing the
      current state(s) of the Markov chain(s).
    previous_kernel_results: A `Tensor` or a nested collection of `Tensor`s.
      Warm-start for the auxiliary state needed by the given `kernel`.
      If not supplied, `sample_fold` will cold-start with
      `kernel.bootstrap_results`.
    kernel: An instance of `tfp.mcmc.TransitionKernel` which implements one step
      of the Markov chain.
    reducer: A (possibly nested) structure of `Reducer`s to be evaluated
      on the `kernel`'s samples. If no reducers are given (`reducer=None`),
      then `None` will be returned in place of streaming calculations.
    previous_reducer_state: A (possibly nested) structure of running states
      corresponding to the structure in `reducer`.  For resuming streaming
      reduction computations begun in a previous run.
    return_final_reducer_states: A Python `bool` giving whether to return
      resumable final reducer states.
    num_burnin_steps: Integer or scalar `Tensor` representing the number
        of chain steps to take before starting to collect results.
        Defaults to 0 (i.e., no burn-in).
    num_steps_between_results: Integer or scalar `Tensor` representing
      the number of chain steps between collecting a result. Only one out
      of every `num_steps_between_samples + 1` steps is included in the
      returned results. Defaults to 0 (i.e., no thinning).
    parallel_iterations: The number of iterations allowed to run in parallel. It
      must be a positive integer. See `tf.while_loop` for more details.
    seed: Optional seed for reproducible sampling.
    name: Python `str` name prefixed to Ops created by this function.
      Default value: `None` (i.e., 'mcmc_sample_fold').

  Returns:
    reduction_results: A (possibly nested) structure of finalized reducer
      statistics. The structure identically mimics that of `reducer`.
    end_state: The final state of the Markov chain(s).
    final_kernel_results: `collections.namedtuple` of internal calculations
      used to advance the supplied `kernel`. These results do not include
      the kernel results of `WithReductions` or `SampleDiscardingKernel`.
    final_reducer_states: A (possibly nested) structure of final running reducer
      states, if `return_final_reducer_states` was `True`.  Can be used to
      resume streaming reductions when continuing sampling.
  """
    with tf.name_scope(name or 'mcmc_sample_fold'):
        num_steps = tf.convert_to_tensor(num_steps,
                                         dtype=tf.int32,
                                         name='num_steps')
        current_state = tf.nest.map_structure(
            lambda x: tf.convert_to_tensor(x, name='current_state'),
            current_state)
        reducer_was_none = False
        if reducer is None:
            reducer = []
            reducer_was_none = True
        thinning_kernel = sample_discarding_kernel.SampleDiscardingKernel(
            inner_kernel=kernel,
            num_burnin_steps=num_burnin_steps,
            num_steps_between_results=num_steps_between_results)
        reduction_kernel = with_reductions.WithReductions(
            inner_kernel=thinning_kernel,
            reducer=reducer,
        )
        if previous_kernel_results is None:
            previous_kernel_results = kernel.bootstrap_results(current_state)
        thinning_pkr = thinning_kernel.bootstrap_results(
            current_state, previous_kernel_results)
        reduction_pkr = reduction_kernel.bootstrap_results(
            current_state, thinning_pkr, previous_reducer_state)

        end_state, final_kernel_results = exp_sample_lib.step_kernel(
            num_steps=num_steps,
            current_state=current_state,
            previous_kernel_results=reduction_pkr,
            kernel=reduction_kernel,
            return_final_kernel_results=True,
            parallel_iterations=parallel_iterations,
            seed=seed,
            name=name,
        )
        reduction_results = nest.map_structure_up_to(
            reducer,
            lambda r, s: r.finalize(s),
            reducer,
            final_kernel_results.streaming_calculations,
            check_types=False)
        if reducer_was_none:
            reduction_results = None
        # TODO(axch): Choose a friendly return value convention that
        # - Doesn't burden the user with needless stuff when they don't want it
        # - Supports warm restart when the user does want it
        # - Doesn't trigger Pylint's unbalanced-tuple-unpacking warning.
        if return_final_reducer_states:
            return (reduction_results, end_state,
                    final_kernel_results.inner_results.inner_results,
                    final_kernel_results.streaming_calculations)
        else:
            return (reduction_results, end_state,
                    final_kernel_results.inner_results.inner_results)
示例#3
0
def sample_fold(
    num_steps,
    current_state,
    previous_kernel_results=None,
    kernel=None,
    reducer=None,
    num_burnin_steps=0,
    num_steps_between_results=0,
    parallel_iterations=10,
    seed=None,
    name=None,
):
    """Computes the requested reductions over the `kernel`'s samples.

  To wit, runs the given `kernel` for `num_steps` steps, and consumes
  the stream of samples with the given `Reducer`s' `one_step` method(s).
  This runs in constant memory (unless a given `Reducer` builds a
  large structure).

  The driver internally composes the correct onion of `WithReductions`
  and `SampleDiscardingKernel` to implement the requested optionally
  thinned reduction; however, the kernel results of those applied
  Transition Kernels will not be returned. Hence, if warm-restarting
  reductions is desired, one should manually build the Transition Kernel
  onion and use `tfp.experimental.mcmc.step_kernel`.

  An arbitrary collection of `reducer` can be provided, and the resulting
  finalized statistic(s) will be returned in an identical structure.

  Args:
    num_steps: Integer or scalar `Tensor` representing the number of `Reducer`
      steps.
    current_state: `Tensor` or Python `list` of `Tensor`s representing the
      current state(s) of the Markov chain(s).
    previous_kernel_results: A `Tensor` or a nested collection of `Tensor`s.
      Warm-start for the auxiliary state needed by the given `kernel`.
      If not supplied, `sample_fold` will cold-start with
      `kernel.bootstrap_results`.
    kernel: An instance of `tfp.mcmc.TransitionKernel` which implements one step
      of the Markov chain.
    reducer: A (possibly nested) structure of `Reducer`s to be evaluated
      on the `kernel`'s samples. If no reducers are given (`reducer=None`),
      then `None` will be returned in place of streaming calculations.
    num_burnin_steps: Integer or scalar `Tensor` representing the number
        of chain steps to take before starting to collect results.
        Defaults to 0 (i.e., no burn-in).
    num_steps_between_results: Integer or scalar `Tensor` representing
      the number of chain steps between collecting a result. Only one out
      of every `num_steps_between_samples + 1` steps is included in the
      returned results. Defaults to 0 (i.e., no thinning).
    parallel_iterations: The number of iterations allowed to run in parallel. It
      must be a positive integer. See `tf.while_loop` for more details.
    seed: Optional seed for reproducible sampling.
    name: Python `str` name prefixed to Ops created by this function.
      Default value: `None` (i.e., 'mcmc_sample_fold').

  Returns:
    reduction_results: A (possibly nested) structure of finalized reducer
      statistics. The structure identically mimics that of `reducer`.
    end_state: The final state of the Markov chain(s).
    final_kernel_results: `collections.namedtuple` of internal calculations
      used to advance the supplied `kernel`. These results do not include
      the kernel results of `WithReductions` or `SampleDiscardingKernel`.
  """
    with tf.name_scope(name or 'mcmc_sample_fold'):
        num_steps = tf.convert_to_tensor(num_steps,
                                         dtype=tf.int32,
                                         name='num_steps')
        current_state = tf.nest.map_structure(
            lambda x: tf.convert_to_tensor(x, name='current_state'),
            current_state)
        reducer_was_none = False
        if reducer is None:
            reducer = []
            reducer_was_none = True
        reduction_kernel = with_reductions.WithReductions(
            inner_kernel=sample_discarding_kernel.SampleDiscardingKernel(
                inner_kernel=kernel,
                num_burnin_steps=num_burnin_steps,
                num_steps_between_results=num_steps_between_results),
            reducer=reducer,
        )
        end_state, final_kernel_results = sample.step_kernel(
            num_steps=num_steps,
            current_state=current_state,
            previous_kernel_results=previous_kernel_results,
            kernel=reduction_kernel,
            return_final_kernel_results=True,
            parallel_iterations=parallel_iterations,
            seed=seed,
            name=name,
        )
        reduction_results = nest.map_structure_up_to(
            reducer,
            lambda r, s: r.finalize(s),
            reducer,
            final_kernel_results.streaming_calculations,
            check_types=False)
        if reducer_was_none:
            reduction_results = None
        return (reduction_results, end_state,
                final_kernel_results.inner_results.inner_results)