Ejemplo n.º 1
0
def gaussian_processes(draw,
                       kernel_name=None,
                       batch_shape=None,
                       event_dim=None,
                       feature_dim=None,
                       feature_ndims=None,
                       enable_vars=False):
    # First draw a kernel.
    k, _ = draw(
        kernel_hps.base_kernels(
            kernel_name=kernel_name,
            batch_shape=batch_shape,
            event_dim=event_dim,
            feature_dim=feature_dim,
            feature_ndims=feature_ndims,
            # Disable variables
            enable_vars=False))
    compatible_batch_shape = draw(
        tfp_hps.broadcast_compatible_shape(k.batch_shape))
    index_points = draw(
        kernel_hps.kernel_input(batch_shape=compatible_batch_shape,
                                example_ndims=1,
                                feature_dim=feature_dim,
                                feature_ndims=feature_ndims,
                                enable_vars=enable_vars,
                                name='index_points'))
    params = draw(
        broadcasting_params('GaussianProcess',
                            compatible_batch_shape,
                            event_dim=event_dim,
                            enable_vars=enable_vars))

    gp = tfd.GaussianProcess(
        kernel=k,
        index_points=index_points,
        cholesky_fn=lambda x: marginal_fns.retrying_cholesky(x)[0],
        observation_noise_variance=params['observation_noise_variance'])
    return gp
Ejemplo n.º 2
0
def schur_complements(draw,
                      batch_shape=None,
                      event_dim=None,
                      feature_dim=None,
                      feature_ndims=None,
                      enable_vars=None,
                      depth=None):
    """Strategy for drawing `SchurComplement` kernels.

  The underlying kernel is drawn from the `kernels` strategy.

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      Kernel.  Hypothesis will pick a batch shape if omitted.
    event_dim: Optional Python int giving the size of each of the
      kernel's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    feature_dim: Optional Python int giving the size of each feature dimension.
      If omitted, Hypothesis will choose one.
    feature_ndims: Optional Python int stating the number of feature dimensions
      inputs will have. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all Tensors, never Variables or DeferredTensor.
    depth: Python `int` giving maximum nesting depth of compound kernel.

  Returns:
    kernels: A strategy for drawing `SchurComplement` kernels with the specified
      `batch_shape` (or an arbitrary one if omitted).
  """
    if depth is None:
        depth = draw(depths())
    if batch_shape is None:
        batch_shape = draw(tfp_hps.shapes())
    if event_dim is None:
        event_dim = draw(hps.integers(min_value=2, max_value=6))
    if feature_dim is None:
        feature_dim = draw(hps.integers(min_value=2, max_value=6))
    if feature_ndims is None:
        feature_ndims = draw(hps.integers(min_value=2, max_value=6))

    base_kernel, kernel_variable_names = draw(
        kernels(batch_shape=batch_shape,
                event_dim=event_dim,
                feature_dim=feature_dim,
                feature_ndims=feature_ndims,
                enable_vars=False,
                depth=depth - 1))

    # SchurComplement requires the inputs to have one example dimension.
    fixed_inputs = draw(
        kernel_input(batch_shape=batch_shape,
                     example_ndims=1,
                     feature_dim=feature_dim,
                     feature_ndims=feature_ndims))
    # Positive shift to ensure the divisor matrix is PD.
    diag_shift = np.float64(
        draw(
            hpnp.arrays(dtype=np.float64,
                        shape=tensorshape_util.as_list(batch_shape),
                        elements=hps.floats(1,
                                            100,
                                            allow_nan=False,
                                            allow_infinity=False))))

    hp.note('Forming SchurComplement kernel with fixed_inputs: {} '
            'and diag_shift: {}'.format(fixed_inputs, diag_shift))

    schur_complement_params = {
        'fixed_inputs': fixed_inputs,
        'diag_shift': diag_shift
    }

    for param_name in schur_complement_params:
        if enable_vars and draw(hps.booleans()):
            kernel_variable_names.append(param_name)
            schur_complement_params[param_name] = tf.Variable(
                schur_complement_params[param_name], name=param_name)
            if draw(hps.booleans()):
                schur_complement_params[
                    param_name] = tfp_hps.defer_and_count_usage(
                        schur_complement_params[param_name])
    result_kernel = tfpk.SchurComplement(
        base_kernel=base_kernel,
        fixed_inputs=schur_complement_params['fixed_inputs'],
        diag_shift=schur_complement_params['diag_shift'],
        cholesky_fn=lambda x: marginal_fns.retrying_cholesky(x)[0],
        validate_args=True)
    return result_kernel, kernel_variable_names
Ejemplo n.º 3
0
def student_t_process_regression_models(draw,
                                        kernel_name=None,
                                        batch_shape=None,
                                        event_dim=None,
                                        feature_dim=None,
                                        feature_ndims=None,
                                        enable_vars=False):
  # First draw a kernel.
  k, _ = draw(kernel_hps.base_kernels(
      kernel_name=kernel_name,
      batch_shape=batch_shape,
      event_dim=event_dim,
      feature_dim=feature_dim,
      feature_ndims=feature_ndims,
      # Disable variables
      enable_vars=False))
  compatible_batch_shape = draw(
      tfp_hps.broadcast_compatible_shape(k.batch_shape))
  index_points = draw(kernel_hps.kernel_input(
      batch_shape=compatible_batch_shape,
      example_ndims=1,
      feature_dim=feature_dim,
      feature_ndims=feature_ndims,
      enable_vars=enable_vars,
      name='index_points'))
  hp.note('Index points:\n{}'.format(repr(index_points)))

  observation_index_points = draw(
      kernel_hps.kernel_input(
          batch_shape=compatible_batch_shape,
          example_ndims=1,
          feature_dim=feature_dim,
          feature_ndims=feature_ndims,
          enable_vars=enable_vars,
          name='observation_index_points'))
  hp.note('Observation index points:\n{}'.format(
      repr(observation_index_points)))

  observations = draw(kernel_hps.kernel_input(
      batch_shape=compatible_batch_shape,
      example_ndims=1,
      # This is the example dimension suggested observation_index_points.
      example_dim=int(observation_index_points.shape[-(feature_ndims + 1)]),
      # No feature dimensions.
      feature_dim=0,
      feature_ndims=0,
      enable_vars=enable_vars,
      name='observations'))
  hp.note('Observations:\n{}'.format(repr(observations)))

  params = draw(broadcasting_params(
      'StudentTProcessRegressionModel',
      compatible_batch_shape,
      event_dim=event_dim,
      enable_vars=enable_vars))
  hp.note('Params:\n{}'.format(repr(params)))

  stp = tfd.StudentTProcessRegressionModel(
      # Ensure that the `df` parameter is not a `Variable` since we pass
      # in a `DeferredTensor` of the `df` parameter.
      df=tf.convert_to_tensor(params['df']),
      kernel=k,
      index_points=index_points,
      observation_index_points=observation_index_points,
      observations=observations,
      cholesky_fn=lambda x: marginal_fns.retrying_cholesky(x)[0],
      observation_noise_variance=params['observation_noise_variance'])
  return stp
Ejemplo n.º 4
0
def gaussian_process_regression_models(draw,
                                       kernel_name=None,
                                       batch_shape=None,
                                       event_dim=None,
                                       feature_dim=None,
                                       feature_ndims=None,
                                       enable_vars=False):
  # First draw a kernel.
  k, _ = draw(kernel_hps.base_kernels(
      kernel_name=kernel_name,
      batch_shape=batch_shape,
      event_dim=event_dim,
      feature_dim=feature_dim,
      feature_ndims=feature_ndims,
      # Disable variables
      enable_vars=False))
  compatible_batch_shape = draw(
      tfp_hps.broadcast_compatible_shape(k.batch_shape))
  index_points = draw(kernel_hps.kernel_input(
      batch_shape=compatible_batch_shape,
      example_ndims=1,
      feature_dim=feature_dim,
      feature_ndims=feature_ndims,
      enable_vars=enable_vars,
      name='index_points'))
  hp.note('Index points:\n{}'.format(repr(index_points)))

  observation_index_points = draw(
      kernel_hps.kernel_input(
          batch_shape=compatible_batch_shape,
          example_ndims=1,
          feature_dim=feature_dim,
          feature_ndims=feature_ndims,
          enable_vars=enable_vars,
          name='observation_index_points'))
  hp.note('Observation index points:\n{}'.format(
      repr(observation_index_points)))

  observations = draw(kernel_hps.kernel_input(
      batch_shape=compatible_batch_shape,
      example_ndims=1,
      # This is the example dimension suggested observation_index_points.
      example_dim=int(observation_index_points.shape[-(feature_ndims + 1)]),
      # No feature dimensions.
      feature_dim=0,
      feature_ndims=0,
      enable_vars=enable_vars,
      name='observations'))
  hp.note('Observations:\n{}'.format(repr(observations)))

  params = draw(broadcasting_params(
      'GaussianProcessRegressionModel',
      compatible_batch_shape,
      event_dim=event_dim,
      enable_vars=enable_vars))
  hp.note('Params:\n{}'.format(repr(params)))

  gp = tfd.GaussianProcessRegressionModel(
      kernel=k,
      index_points=index_points,
      observation_index_points=observation_index_points,
      observations=observations,
      cholesky_fn=lambda x: marginal_fns.retrying_cholesky(x)[0],
      observation_noise_variance=params['observation_noise_variance'])
  return gp