Beispiel #1
0
  def testExcessiveConcretizationOfParamsWithReparameterization(self):
    logits = tfp_hps.defer_and_count_usage(self._build_variable(
        np.zeros(5), name='logits', static_rank=True))
    loc = tfp_hps.defer_and_count_usage(self._build_variable(
        np.zeros((4, 4, 5)), name='loc', static_rank=True))
    scale = tfp_hps.defer_and_count_usage(self._build_variable(
        1., name='scale', static_rank=True))
    dist = tfd.MixtureSameFamily(
        mixture_distribution=tfd.Categorical(logits=logits),
        components_distribution=tfd.Logistic(loc=loc, scale=scale),
        reparameterize=True, validate_args=True)

    # TODO(b/140579567): With reparameterization, there are additional reads of
    # the parameters of the underlying mixture and components distributions when
    # sampling, from calls in `_distributional_transform` to:
    #
    #  - `self.mixture_distribution.logits_parameter`
    #  - `self.components_distribution.log_prob`
    #  - `self.components_distribution.cdf`
    #
    # NOTE: In the unlikely case that samples have a statically-known rank but
    # the rank of `self.components_distribution.event_shape` is not known
    # statically, there can be additional reads in `_distributional_transform`
    # from calling `self.components_distribution.is_scalar_event`.

    with tfp_hps.assert_no_excessive_var_usage('sample', max_permissible=4):
      dist.sample(seed=test_util.test_seed())
Beispiel #2
0
    def testExcessiveConcretizationOfParamsBatchShapeOverride(self):
        # Test methods that are not implemented if event_shape is overriden.
        loc = tfp_hps.defer_and_count_usage(
            tf.Variable(0., name='loc', dtype=tf.float32, shape=self.shape))
        scale = tfp_hps.defer_and_count_usage(
            tf.Variable(2., name='scale', dtype=tf.float32, shape=self.shape))
        bij_scale = tfp_hps.defer_and_count_usage(
            tf.Variable(2.,
                        name='bij_scale',
                        dtype=tf.float32,
                        shape=self.shape))
        batch_shape = tfp_hps.defer_and_count_usage(
            tf.Variable([4, 3, 5],
                        name='input_batch_shape',
                        dtype=tf.int32,
                        shape=self.shape))
        dist = tfd.TransformedDistribution(
            distribution=tfd.Normal(loc=loc, scale=scale, validate_args=True),
            bijector=tfb.Scale(scale=bij_scale, validate_args=True),
            batch_shape=batch_shape,
            validate_args=True)

        for method in ('log_cdf', 'cdf', 'survival_function',
                       'log_survival_function'):
            with tfp_hps.assert_no_excessive_var_usage(
                    method, max_permissible=self.max_permissible[method]):
                getattr(dist, method)(np.ones((4, 3, 2)) / 3.)

        with tfp_hps.assert_no_excessive_var_usage(
                'quantile', max_permissible=self.max_permissible['quantile']):
            dist.quantile(.1)
Beispiel #3
0
  def testExcessiveConcretizationOfParams(self):
    logits = tfp_hps.defer_and_count_usage(
        self._build_variable(np.zeros((4, 4, 5)), name='logits'))
    concentration = tfp_hps.defer_and_count_usage(
        self._build_variable(np.zeros((4, 4, 5, 3)), name='concentration'))
    dist = tfd.MixtureSameFamily(
        mixture_distribution=tfd.Categorical(logits=logits),
        components_distribution=tfd.Dirichlet(concentration=concentration),
        validate_args=True)

    # Many methods use mixture_distribution and components_distribution at most
    # once, and thus incur no extra reads/concretizations of parameters.

    for method in ('batch_shape_tensor', 'event_shape_tensor',
                   'mean'):
      with tfp_hps.assert_no_excessive_var_usage(method, max_permissible=2):
        getattr(dist, method)()

    with tfp_hps.assert_no_excessive_var_usage('sample', max_permissible=2):
      dist.sample(seed=test_util.test_seed())

    for method in ('log_prob', 'prob'):
      with tfp_hps.assert_no_excessive_var_usage(method, max_permissible=2):
        getattr(dist, method)(np.ones((4, 4, 3)) / 3.)

    # TODO(b/140579567): The `variance()` and `covariance()` methods require
    # calling both:
    #  - `self.components_distribution.mean()`
    #  - `self.components_distribution.variance()` or `.covariance()`
    # Thus, these methods incur an additional concretization (or two if
    # `validate_args=True` for `self.components_distribution`).

    for method in ('variance', 'covariance'):
      with tfp_hps.assert_no_excessive_var_usage(method, max_permissible=3):
        getattr(dist, method)()
Beispiel #4
0
  def testExcessiveConcretizationWithDefaultReinterpretedBatchNdims(self):
    loc = tfp_hps.defer_and_count_usage(
        tf.Variable(np.zeros((5, 2, 3)), shape=tf.TensorShape(None)))
    scale = tfp_hps.defer_and_count_usage(
        tf.Variable(np.ones([]), shape=tf.TensorShape(None)))
    dist = tfd.Independent(
        tfd.Logistic(loc=loc, scale=scale, validate_args=True),
        reinterpreted_batch_ndims=None, validate_args=True)

    for method in ('batch_shape_tensor', 'event_shape_tensor',
                   'mean', 'variance', 'sample'):
      with tfp_hps.assert_no_excessive_var_usage(method, max_permissible=4):
        getattr(dist, method)()

    # In addition to the four reads of `loc`, `scale` described above in
    # `testExcessiveConcretizationOfParams`, the methods below have two more
    # reads of these parameters -- from computing a default value for
    # `reinterpreted_batch_ndims`, which requires calling
    # `dist.distribution.batch_shape_tensor()`.

    for method in ('log_prob', 'log_cdf', 'prob', 'cdf'):
      with tfp_hps.assert_no_excessive_var_usage(method, max_permissible=6):
        getattr(dist, method)(np.zeros((4, 5, 2, 3)))

    with tfp_hps.assert_no_excessive_var_usage('entropy', max_permissible=6):
      dist.entropy()

    # `Distribution.survival_function` and `Distribution.log_survival_function`
    # will call `Distribution.cdf` and `Distribution.log_cdf`, resulting in
    # one additional call to `Independent._parameter_control_dependencies`,
    # and thus two additional concretizations of the parameters.

    for method in ('survival_function', 'log_survival_function'):
      with tfp_hps.assert_no_excessive_var_usage(method, max_permissible=8):
        getattr(dist, method)(np.zeros((4, 5, 2, 3)))
Beispiel #5
0
  def testExcessiveConcretizationOfParams(self):
    loc = tfp_hps.defer_and_count_usage(
        tf.Variable(0., name='loc', dtype=tf.float32, shape=self.shape))
    scale = tfp_hps.defer_and_count_usage(
        tf.Variable(2., name='scale', dtype=tf.float32, shape=self.shape))
    bij_scale = tfp_hps.defer_and_count_usage(
        tf.Variable(2., name='bij_scale', dtype=tf.float32, shape=self.shape))
    event_shape = tfp_hps.defer_and_count_usage(
        tf.Variable([2, 2], name='input_event_shape', dtype=tf.int32,
                    shape=self.shape))
    batch_shape = tfp_hps.defer_and_count_usage(
        tf.Variable([4, 3, 5], name='input_batch_shape', dtype=tf.int32,
                    shape=self.shape))

    dist = tfd.TransformedDistribution(
        distribution=tfd.Normal(loc=loc, scale=scale, validate_args=True),
        bijector=tfb.Scale(scale=bij_scale, validate_args=True),
        event_shape=event_shape,
        batch_shape=batch_shape,
        validate_args=True)

    for method in ('mean', 'entropy', 'event_shape_tensor',
                   'batch_shape_tensor'):
      with tfp_hps.assert_no_excessive_var_usage(
          method, max_permissible=self.max_permissible[method]):
        getattr(dist, method)()

    with tfp_hps.assert_no_excessive_var_usage(
        'sample', max_permissible=self.max_permissible['sample']):
      dist.sample(seed=test_util.test_seed())

    for method in ('log_prob', 'prob'):
      with tfp_hps.assert_no_excessive_var_usage(
          method, max_permissible=self.max_permissible[method]):
        getattr(dist, method)(np.ones((4, 3, 5, 2, 2)) / 3.)
Beispiel #6
0
    def testExcessiveConcretizationOfParams(self):
        logits = tfp_hps.defer_and_count_usage(
            tf.Variable(np.zeros((3, 5, 2)),
                        dtype=tf.float32,
                        shape=tf.TensorShape([None, None, 2]),
                        name='logits'))
        concentration = tfp_hps.defer_and_count_usage(
            tf.Variable(np.ones((3, 5, 4)),
                        dtype=tf.float32,
                        shape=tf.TensorShape(None),
                        name='concentration'))
        loc = tfp_hps.defer_and_count_usage(
            tf.Variable(np.zeros((3, 5, 4)),
                        dtype=tf.float32,
                        shape=tf.TensorShape(None),
                        name='loc'))
        scale = tfp_hps.defer_and_count_usage(
            tf.Variable(1.,
                        dtype=tf.float32,
                        shape=tf.TensorShape(None),
                        name='scale'))

        dist = tfd.Mixture(tfd.Categorical(logits=logits),
                           components=[
                               tfd.Dirichlet(concentration),
                               tfd.Independent(tfd.Normal(loc=loc,
                                                          scale=scale),
                                               reinterpreted_batch_ndims=1)
                           ],
                           use_static_graph=self.use_static_graph,
                           validate_args=True)

        for method in ('batch_shape_tensor', 'event_shape_tensor',
                       'entropy_lower_bound'):
            with tfp_hps.assert_no_excessive_var_usage(method,
                                                       max_permissible=2):
                getattr(dist, method)()

        with tfp_hps.assert_no_excessive_var_usage('sample',
                                                   max_permissible=2):
            dist.sample(seed=test_util.test_seed())

        for method in ('prob', 'log_prob'):
            with tfp_hps.assert_no_excessive_var_usage('method',
                                                       max_permissible=2):
                getattr(dist, method)(tf.ones((3, 5, 4)) / 4.)

        # TODO(b/140579567): The `stddev()` and `variance()` methods require
        # calling both:
        #  - `self.components[i].mean()`
        #  - `self.components[i].stddev()`
        # Thus, these methods incur an additional concretization (or two if
        # `validate_args=True` for `self.components[i]`).

        for method in ('stddev', 'variance'):
            with tfp_hps.assert_no_excessive_var_usage(method,
                                                       max_permissible=3):
                getattr(dist, method)()
Beispiel #7
0
    def testExcessiveConcretizationOfParams(self):
        loc = tfp_hps.defer_and_count_usage(
            tf.Variable(np.zeros((4, 2, 2)), shape=tf.TensorShape(None)))
        scale = tfp_hps.defer_and_count_usage(
            tf.Variable(np.ones([]), shape=tf.TensorShape(None)))
        ndims = tf.Variable(1, trainable=False, shape=tf.TensorShape(None))
        dist = tfd.Independent(tfd.Logistic(loc=loc,
                                            scale=scale,
                                            validate_args=True),
                               reinterpreted_batch_ndims=ndims,
                               validate_args=True)

        # TODO(b/140579567): All methods of `dist` may require four concretizations
        # of parameters `loc` and `scale`:
        #  - `Independent._parameter_control_dependencies` calls
        #    `Logistic.batch_shape_tensor`, which:
        #    * Reads `loc`, `scale` in `Logistic._parameter_control_dependencies`.
        #    * Reads `loc`, `scale` in `Logistic._batch_shape_tensor`.
        #  - The method `dist.m` will call `dist.self.m`, which:
        #    * Reads `loc`, `scale` in `Logistic._parameter_control_dependencies`.
        #    * Reads `loc`, `scale` in the implementation of method  `Logistic._m`.
        #
        # NOTE: If `dist.distribution` had dynamic batch shape and event shape,
        # there could be two more reads of the parameters of `dist.distribution`
        # in `dist.event_shape_tensor`, from calling
        # `dist.distribution.event_shape_tensor()`.

        for method in ('batch_shape_tensor', 'event_shape_tensor', 'mode',
                       'stddev', 'entropy'):
            with tfp_hps.assert_no_excessive_var_usage(method,
                                                       max_permissible=4):
                getattr(dist, method)()

        with tfp_hps.assert_no_excessive_var_usage('sample',
                                                   max_permissible=4):
            dist.sample(seed=test_util.test_seed())

        for method in ('log_prob', 'log_cdf', 'prob', 'cdf'):
            with tfp_hps.assert_no_excessive_var_usage(method,
                                                       max_permissible=4):
                getattr(dist, method)(np.zeros((3, 4, 2, 2)))

        # `Distribution.survival_function` and `Distribution.log_survival_function`
        # will call `Distribution.cdf` and `Distribution.log_cdf`, resulting in
        # one additional call to `Independent._parameter_control_dependencies`,
        # and thus two additional concretizations of the parameters.

        for method in ('survival_function', 'log_survival_function'):
            with tfp_hps.assert_no_excessive_var_usage(method,
                                                       max_permissible=6):
                getattr(dist, method)(np.zeros((3, 4, 2, 2)))
Beispiel #8
0
def kernel_input(draw,
                 batch_shape,
                 example_dim=None,
                 example_ndims=None,
                 feature_dim=None,
                 feature_ndims=None,
                 enable_vars=False,
                 name=None):
    """Strategy for drawing arbitrary Kernel input.

  Args:
    draw: Hypothesis function supplied by `@hps.composite`.
    batch_shape: `TensorShape`. The batch shape of the resulting
      kernel input.
    example_dim: Optional Python int giving the size of each example dimension.
      If omitted, Hypothesis will choose one.
    example_ndims: Optional Python int giving the number of example dimensions
      of the input. If omitted, Hypothesis will choose one.
    feature_dim: Optional Python int giving the size of each feature dimension.
      If omitted, Hypothesis will choose one.
    feature_ndims: Optional Python int stating the number of feature dimensions
      inputs will have. If omitted, Hypothesis will choose one.
    enable_vars: If `False`, the returned parameters are all Tensors, never
      Variables or DeferredTensor.
    name: Name to give the variable.

  Returns:
    kernel_input: A strategy for drawing kernel_input with the prescribed shape
      (or an arbitrary one if omitted).
  """
    if example_ndims is None:
        example_ndims = draw(hps.integers(min_value=1, max_value=4))
    if example_dim is None:
        example_dim = draw(hps.integers(min_value=2, max_value=6))

    if feature_ndims is None:
        feature_ndims = draw(hps.integers(min_value=1, max_value=4))
    if feature_dim is None:
        feature_dim = draw(hps.integers(min_value=2, max_value=6))

    input_shape = batch_shape
    input_shape += [example_dim] * example_ndims
    input_shape += [feature_dim] * feature_ndims
    # We would like kernel inputs to be unique. This is to avoid computing kernel
    # matrices that are semi-definite.
    x = draw(
        hpnp.arrays(dtype=np.float64,
                    shape=input_shape.as_list(),
                    elements=hps.floats(-50,
                                        50,
                                        allow_nan=False,
                                        allow_infinity=False),
                    unique=True))
    if enable_vars and draw(hps.booleans()):
        x = tf.Variable(x, name=name)
        if draw(hps.booleans()):
            x = tfp_hps.defer_and_count_usage(x)
    return x
Beispiel #9
0
 def testConcretizationLimits(self):
   shape_out = tfp_hps.defer_and_count_usage(tf.Variable([1]))
   reshape = tfb.Reshape(shape_out, validate_args=True)
   x = [1]  # Pun: valid input or output, and valid input or output shape
   for method in ['forward', 'inverse', 'forward_event_shape',
                  'inverse_event_shape', 'forward_event_shape_tensor',
                  'inverse_event_shape_tensor']:
     with tfp_hps.assert_no_excessive_var_usage(method, max_permissible=7):
       getattr(reshape, method)(x)
   for method in ['forward_log_det_jacobian', 'inverse_log_det_jacobian']:
     with tfp_hps.assert_no_excessive_var_usage(method, max_permissible=4):
       getattr(reshape, method)(x, event_ndims=1)
Beispiel #10
0
def schur_complements(draw,
                      batch_shape=None,
                      event_dim=None,
                      feature_dim=None,
                      feature_ndims=None,
                      enable_vars=None,
                      depth=None):
    """Strategy for drawing `SchurComplement` kernels.

  The underlying kernel is drawn from the `kernels` strategy.

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      Kernel.  Hypothesis will pick a batch shape if omitted.
    event_dim: Optional Python int giving the size of each of the
      kernel's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    feature_dim: Optional Python int giving the size of each feature dimension.
      If omitted, Hypothesis will choose one.
    feature_ndims: Optional Python int stating the number of feature dimensions
      inputs will have. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all Tensors, never Variables or DeferredTensor.
    depth: Python `int` giving maximum nesting depth of compound kernel.

  Returns:
    kernels: A strategy for drawing `SchurComplement` kernels with the specified
      `batch_shape` (or an arbitrary one if omitted).
  """
    if depth is None:
        depth = draw(depths())
    if batch_shape is None:
        batch_shape = draw(tfp_hps.shapes())
    if event_dim is None:
        event_dim = draw(hps.integers(min_value=2, max_value=6))
    if feature_dim is None:
        feature_dim = draw(hps.integers(min_value=2, max_value=6))
    if feature_ndims is None:
        feature_ndims = draw(hps.integers(min_value=2, max_value=6))

    base_kernel, kernel_variable_names = draw(
        kernels(batch_shape=batch_shape,
                event_dim=event_dim,
                feature_dim=feature_dim,
                feature_ndims=feature_ndims,
                enable_vars=False,
                depth=depth - 1))

    # SchurComplement requires the inputs to have one example dimension.
    fixed_inputs = draw(
        kernel_input(batch_shape=batch_shape,
                     example_ndims=1,
                     feature_dim=feature_dim,
                     feature_ndims=feature_ndims))
    # Positive shift to ensure the divisor matrix is PD.
    diag_shift = np.float64(
        draw(
            hpnp.arrays(dtype=np.float64,
                        shape=batch_shape.as_list(),
                        elements=hps.floats(1,
                                            100,
                                            allow_nan=False,
                                            allow_infinity=False))))

    hp.note('Forming SchurComplement kernel with fixed_inputs: {} '
            'and diag_shift: {}'.format(fixed_inputs, diag_shift))

    schur_complement_params = {
        'fixed_inputs': fixed_inputs,
        'diag_shift': diag_shift
    }
    for param_name in schur_complement_params:
        if enable_vars and draw(hps.booleans()):
            kernel_variable_names.append(param_name)
            schur_complement_params[param_name] = tf.Variable(
                schur_complement_params[param_name], name=param_name)
            if draw(hps.booleans()):
                schur_complement_params[
                    param_name] = tfp_hps.defer_and_count_usage(
                        schur_complement_params[param_name])
    result_kernel = tfp.positive_semidefinite_kernels.SchurComplement(
        base_kernel=base_kernel,
        fixed_inputs=schur_complement_params['fixed_inputs'],
        diag_shift=schur_complement_params['diag_shift'],
        validate_args=True)
    return result_kernel, kernel_variable_names
Beispiel #11
0
def spectral_mixtures(draw,
                      batch_shape=None,
                      event_dim=None,
                      feature_dim=None,
                      feature_ndims=None,
                      enable_vars=None,
                      depth=None):
    """Strategy for drawing `SpectralMixture` kernels.

  The underlying kernel is drawn from the `kernels` strategy.

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      Kernel.  Hypothesis will pick a batch shape if omitted.
    event_dim: Optional Python int giving the size of each of the
      kernel's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    feature_dim: Optional Python int giving the size of each feature dimension.
      If omitted, Hypothesis will choose one.
    feature_ndims: Optional Python int stating the number of feature dimensions
      inputs will have. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all Tensors, never Variables or DeferredTensor.
    depth: Python `int` giving maximum nesting depth of compound kernel.

  Returns:
    kernels: A strategy for drawing `SchurComplement` kernels with the specified
      `batch_shape` (or an arbitrary one if omitted).
  """
    if depth is None:
        depth = draw(depths())
    if batch_shape is None:
        batch_shape = draw(tfp_hps.shapes())
    if event_dim is None:
        event_dim = draw(hps.integers(min_value=2, max_value=6))
    if feature_dim is None:
        feature_dim = draw(hps.integers(min_value=2, max_value=6))
    if feature_ndims is None:
        feature_ndims = draw(hps.integers(min_value=2, max_value=6))

    num_mixtures = draw(hps.integers(min_value=2, max_value=5))

    logits = draw(
        kernel_input(batch_shape=batch_shape,
                     example_ndims=0,
                     feature_dim=num_mixtures,
                     feature_ndims=1))

    locs = draw(
        kernel_input(batch_shape=batch_shape,
                     example_ndims=1,
                     example_dim=num_mixtures,
                     feature_dim=feature_dim,
                     feature_ndims=feature_ndims))

    scales = tfp_hps.softplus_plus_eps()(draw(
        kernel_input(batch_shape=batch_shape,
                     example_ndims=1,
                     example_dim=num_mixtures,
                     feature_dim=feature_dim,
                     feature_ndims=feature_ndims)))

    hp.note(f'Forming SpectralMixture kernel with logits: {logits} '
            f'locs: {locs} and scales: {scales}')

    spectral_mixture_params = {
        'locs': locs,
        'logits': logits,
        'scales': scales
    }

    kernel_variable_names = []
    for param_name in spectral_mixture_params:
        if enable_vars and draw(hps.booleans()):
            kernel_variable_names.append(param_name)
            spectral_mixture_params[param_name] = tf.Variable(
                spectral_mixture_params[param_name], name=param_name)
            if draw(hps.booleans()):
                spectral_mixture_params[
                    param_name] = tfp_hps.defer_and_count_usage(
                        spectral_mixture_params[param_name])
    result_kernel = tfpk.SpectralMixture(
        logits=spectral_mixture_params['logits'],
        locs=spectral_mixture_params['locs'],
        scales=spectral_mixture_params['scales'],
        feature_ndims=feature_ndims,
        validate_args=True)
    return result_kernel, kernel_variable_names
Beispiel #12
0
def kumaraswamy_transformeds(draw,
                             batch_shape=None,
                             event_dim=None,
                             feature_dim=None,
                             feature_ndims=None,
                             enable_vars=None,
                             depth=None):
    """Strategy for drawing `KumaraswamyTransformed` kernels.

  The underlying kernel is drawn from the `kernels` strategy.

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      Kernel.  Hypothesis will pick a batch shape if omitted.
    event_dim: Optional Python int giving the size of each of the
      kernel's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    feature_dim: Optional Python int giving the size of each feature dimension.
      If omitted, Hypothesis will choose one.
    feature_ndims: Optional Python int stating the number of feature dimensions
      inputs will have. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all Tensors, never Variables or DeferredTensor.
    depth: Python `int` giving maximum nesting depth of compound kernel.

  Returns:
    kernels: A strategy for drawing `KumaraswamyTransformed` kernels with the
      specified `batch_shape` (or an arbitrary one if omitted).
  """
    if depth is None:
        depth = draw(depths())
    if batch_shape is None:
        batch_shape = draw(tfp_hps.shapes())
    if event_dim is None:
        event_dim = draw(hps.integers(min_value=2, max_value=6))
    if feature_dim is None:
        feature_dim = draw(hps.integers(min_value=2, max_value=6))
    if feature_ndims is None:
        feature_ndims = draw(hps.integers(min_value=2, max_value=6))

    base_kernel, _ = draw(
        kernels(batch_shape=batch_shape,
                event_dim=event_dim,
                feature_dim=feature_dim,
                feature_ndims=feature_ndims,
                enable_vars=False,
                depth=depth - 1))

    concentration1 = constrain_to_range(1., 2.)(draw(
        kernel_input(batch_shape=batch_shape,
                     example_ndims=0,
                     feature_dim=feature_dim,
                     feature_ndims=feature_ndims)))

    concentration0 = constrain_to_range(1., 2.)(draw(
        kernel_input(batch_shape=batch_shape,
                     example_ndims=0,
                     feature_dim=feature_dim,
                     feature_ndims=feature_ndims)))

    concentrations = {
        'concentration1': concentration1,
        'concentration0': concentration0
    }

    kernel_variable_names = []

    for param_name in concentrations:
        if enable_vars and draw(hps.booleans()):
            kernel_variable_names.append(param_name)
            concentrations[param_name] = tf.Variable(
                concentrations[param_name], name=param_name)
            if draw(hps.booleans()):
                concentrations[param_name] = tfp_hps.defer_and_count_usage(
                    concentrations[param_name])

    hp.note('Forming KumaraswamyTransformed kernel with '
            'concentrations: {}'.format(concentrations))

    # We compose with a FeatureTransformed to ensure inputs are positive to
    # Kumaraswamy.

    result_kernel = tfpk.KumaraswamyTransformed(kernel=base_kernel,
                                                validate_args=True,
                                                **concentrations)
    result_kernel = ConstrainToUnit(kernel=result_kernel, validate_args=True)
    return result_kernel, kernel_variable_names
Beispiel #13
0
def kernel_input(draw,
                 batch_shape,
                 example_dim=None,
                 example_ndims=None,
                 feature_dim=None,
                 feature_ndims=None,
                 enable_vars=False,
                 name=None):
    """Strategy for drawing arbitrary Kernel input.

  In order to avoid duplicates (or even numerically near-duplicates), we
  generate inputs on a grid. We let hypothesis generate the number of grid
  points and distance between grid points, within some reasonable pre-defined
  ranges. The result will be a batch of example sets, within which each set of
  examples has no duplicates (but no such duplication avoidance is applied
  accross batches).

  Args:
    draw: Hypothesis function supplied by `@hps.composite`.
    batch_shape: `TensorShape`. The batch shape of the resulting
      kernel input.
    example_dim: Optional Python int giving the size of each example dimension.
      If omitted, Hypothesis will choose one.
    example_ndims: Optional Python int giving the number of example dimensions
      of the input. If omitted, Hypothesis will choose one.
    feature_dim: Optional Python int giving the size of each feature dimension.
      If omitted, Hypothesis will choose one.
    feature_ndims: Optional Python int stating the number of feature dimensions
      inputs will have. If omitted, Hypothesis will choose one.
    enable_vars: If `False`, the returned parameters are all Tensors, never
      Variables or DeferredTensor.
    name: Name to give the variable.

  Returns:
    kernel_input: A strategy for drawing kernel_input with the prescribed shape
      (or an arbitrary one if omitted).
  """
    if example_ndims is None:
        example_ndims = draw(hps.integers(min_value=1, max_value=2))
    if example_dim is None:
        example_dim = draw(hps.integers(min_value=2, max_value=4))

    if feature_ndims is None:
        feature_ndims = draw(hps.integers(min_value=1, max_value=2))
    if feature_dim is None:
        feature_dim = draw(hps.integers(min_value=2, max_value=4))

    batch_shape = tensorshape_util.as_list(batch_shape)
    example_shape = [example_dim] * example_ndims
    feature_shape = [feature_dim] * feature_ndims

    batch_size = int(np.prod(batch_shape))
    example_size = example_dim**example_ndims
    feature_size = feature_dim**feature_ndims

    # We would like each batch of examples to be unique, to avoid computing kernel
    # matrices that are semi-definite. hypothesis.extra.numpy.arrays doesn't have
    # a sense of tolerance, so we need to do some extra work to get points
    # sufficiently far from each other.
    grid_size = draw(hps.integers(min_value=10, max_value=100))
    grid_spacing = draw(hps.floats(min_value=1e-2, max_value=2))
    hp.note('Grid size {} and spacing {}'.format(grid_size, grid_spacing))

    def _grid_indices_to_values(grid_indices):
        return (
            grid_spacing *
            (np.array(grid_indices, dtype=np.float64) - np.float64(grid_size)))

    # We'll construct the result by stacking onto flattened batch, example and
    # feature dims, then reshape to unflatten at the end.
    result = np.zeros([0, example_size, feature_size])
    for _ in range(batch_size):
        seen = set()
        index_array_strategy = hps.tuples(
            *([hps.integers(0, grid_size + 1)] * feature_size)).filter(
                lambda x, seen=seen: x not in seen
            )  # Default param to sate pylint.
        examples = np.zeros([1, 0, feature_size])
        for _ in range(example_size):
            feature_grid_locations = draw(index_array_strategy)
            seen.add(feature_grid_locations)
            example = _grid_indices_to_values(feature_grid_locations)
            example = example[np.newaxis, np.newaxis, ...]
            examples = np.concatenate([examples, example], axis=1)
        result = np.concatenate([result, examples], axis=0)
    result = np.reshape(result, batch_shape + example_shape + feature_shape)

    if enable_vars and draw(hps.booleans()):
        result = tf.Variable(result, name=name)
        if draw(hps.booleans()):
            result = tfp_hps.defer_and_count_usage(result)
    return result