def test_sample(self, data):
        batch_shape = data.draw(tfp_hps.shapes())
        bcast_arg, dist_batch_shp = data.draw(
            tfp_hps.broadcasting_shapes(batch_shape, 2))

        underlying = tfd.Normal(loc=tf.reshape(
            tf.range(float(np.prod(tensorshape_util.as_list(dist_batch_shp)))),
            dist_batch_shp),
                                scale=0.01)

        if not self.is_static_shape:
            bcast_arg = tf.Variable(bcast_arg)
            self.evaluate(bcast_arg.initializer)
        dist = tfd.BatchBroadcast(underlying, bcast_arg)
        sample_shape = data.draw(
            hps.one_of(hps.integers(0, 13), tfp_hps.shapes()))
        sample_batch_event = tf.concat([
            np.int32(sample_shape).reshape([-1]), batch_shape,
            dist.event_shape_tensor()
        ],
                                       axis=0)
        sample = dist.sample(sample_shape, seed=test_util.test_seed())
        if self.is_static_shape:
            self.assertEqual(tf.TensorShape(self.evaluate(sample_batch_event)),
                             sample.shape)
        self.assertAllEqual(sample_batch_event, tf.shape(sample))
        # Since the `loc` of the underlying is simply 0...n-1 (reshaped), and the
        # scale is extremely small, then we can verify that these locations are
        # effectively broadcast out to the full batch shape when sampling.
        self.assertAllClose(tf.broadcast_to(dist.distribution.loc,
                                            sample_batch_event),
                            sample,
                            atol=.1)
def independents(draw,
                 batch_shape=None,
                 event_dim=None,
                 enable_vars=False,
                 depth=None):
    """Strategy for drawing `Independent` distributions.

  The underlying distribution is drawn from the `distributions` strategy.

  Args:
    draw: Hypothesis MacGuffin.  Supplied by `@hps.composite`.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      `Independent` distribution.  Note that the underlying distribution will in
      general have a higher-rank batch shape, to make room for reinterpreting
      some of those dimensions as the `Independent`'s event.  Hypothesis will
      pick one if omitted.
    event_dim: Optional Python int giving the size of each of the underlying
      distribution's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all Tensors, never Variables or DeferredTensor.
    depth: Python `int` giving maximum nesting depth of compound Distributions.

  Returns:
    dists: A strategy for drawing `Independent` distributions with the specified
      `batch_shape` (or an arbitrary one if omitted).
  """
    if depth is None:
        depth = draw(depths())

    reinterpreted_batch_ndims = draw(hps.integers(min_value=0, max_value=2))
    if batch_shape is None:
        batch_shape = draw(tfp_hps.shapes(min_ndims=reinterpreted_batch_ndims))
    else:  # This independent adds some batch dims to its underlying distribution.
        batch_shape = tensorshape_util.concatenate(
            batch_shape,
            draw(
                tfp_hps.shapes(min_ndims=reinterpreted_batch_ndims,
                               max_ndims=reinterpreted_batch_ndims)))
    underlying = draw(
        distributions(batch_shape=batch_shape,
                      event_dim=event_dim,
                      enable_vars=enable_vars,
                      depth=depth - 1))
    logging.info(
        'underlying distribution: %s; parameters used: %s', underlying,
        [k for k, v in six.iteritems(underlying.parameters) if v is not None])
    result_dist = tfd.Independent(
        underlying,
        reinterpreted_batch_ndims=reinterpreted_batch_ndims,
        validate_args=True)
    expected_shape = batch_shape[:len(batch_shape) - reinterpreted_batch_ndims]
    if expected_shape != result_dist.batch_shape:
        msg = ('Independent strategy generated a bad batch shape '
               'for {}, should have been {}.').format(result_dist,
                                                      expected_shape)
        raise AssertionError(msg)
    return result_dist
예제 #3
0
def rq_splines(draw, batch_shape=None, dtype=tf.float32):
    if batch_shape is None:
        batch_shape = draw(tfp_hps.shapes())

    lo = draw(hps.floats(min_value=-5, max_value=.5))
    hi = draw(hps.floats(min_value=-.5, max_value=5))
    lo, hi = min(lo, hi), max(lo, hi) + .2
    hp.note('lo, hi: {!r}'.format((lo, hi)))

    constraints = dict(
        bin_widths=functools.partial(bijector_hps.spline_bin_size_constraint,
                                     hi=hi,
                                     lo=lo,
                                     dtype=dtype),
        bin_heights=functools.partial(bijector_hps.spline_bin_size_constraint,
                                      hi=hi,
                                      lo=lo,
                                      dtype=dtype),
        knot_slopes=functools.partial(bijector_hps.spline_slope_constraint,
                                      dtype=dtype))
    params = draw(
        tfp_hps.broadcasting_params(batch_shape,
                                    params_event_ndims=dict(bin_widths=1,
                                                            bin_heights=1,
                                                            knot_slopes=1),
                                    constraint_fn_for=constraints.get))
    hp.note('params: {!r}'.format(params))
    return tfb.RationalQuadraticSpline(range_min=lo,
                                       validate_args=draw(hps.booleans()),
                                       **params)
    def check_event_space_bijector_constrains(self, dist, data):
        event_space_bijector = dist.experimental_default_event_space_bijector()
        if event_space_bijector is None:
            return

        total_sample_shape = tensorshape_util.concatenate(
            # Draw a sample shape
            data.draw(tfp_hps.shapes()),
            # Draw a shape that broadcasts with `[batch_shape, inverse_event_shape]`
            # where `inverse_event_shape` is the event shape in the bijector's
            # domain. This is the shape of `y` in R**n, such that
            # x = event_space_bijector(y) has the event shape of the distribution.
            data.draw(
                tfp_hps.broadcasting_shapes(tensorshape_util.concatenate(
                    dist.batch_shape,
                    event_space_bijector.inverse_event_shape(
                        dist.event_shape)),
                                            n=1))[0])

        y = data.draw(
            tfp_hps.constrained_tensors(tfp_hps.identity_fn,
                                        total_sample_shape.as_list()))
        with tfp_hps.no_tf_rank_errors():
            x = event_space_bijector(y)
            with tf.control_dependencies(dist._sample_control_dependencies(x)):
                self.evaluate(tf.identity(x))
 def testTheoreticalFldj(self, data):
     # get_fldj_theoretical test rig requires 1-d batches.
     batch_shape = data.draw(tfp_hps.shapes(min_ndims=1, max_ndims=1))
     bijector = data.draw(
         rq_splines(batch_shape=batch_shape, dtype=tf.float64))
     self.assertEqual(tf.float64, bijector.dtype)
     bw, bh, kd = self.evaluate(
         [bijector.bin_widths, bijector.bin_heights, bijector.knot_slopes])
     logging.info('bw: %s\nbh: %s\nkd: %s', bw, bh, kd)
     x_shp = ((bw + bh)[..., :-1] + kd).shape[:-1]
     if x_shp[-1] == 1:  # Possibly broadcast the x dim.
         dim = data.draw(hps.integers(min_value=1, max_value=7))
         x_shp = x_shp[:-1] + (dim, )
     x = np.linspace(-5, 5, np.prod(x_shp),
                     dtype=np.float64).reshape(*x_shp)
     y = self.evaluate(bijector.forward(x))
     bijector_test_util.assert_bijective_and_finite(bijector,
                                                    x,
                                                    y,
                                                    eval_func=self.evaluate,
                                                    event_ndims=0,
                                                    inverse_event_ndims=0,
                                                    rtol=1e-5)
     fldj = bijector.forward_log_det_jacobian(x, event_ndims=0)
     fldj_theoretical = bijector_test_util.get_fldj_theoretical(
         bijector, x, event_ndims=0)
     self.assertAllClose(self.evaluate(fldj_theoretical),
                         self.evaluate(fldj),
                         atol=1e-5,
                         rtol=1e-5)
    def testTheoreticalFldj(self, data):
        dim = data.draw(hps.integers(min_value=0, max_value=10))
        diag_bijector = data.draw(
            bijector_hps.unconstrained_bijectors(
                max_forward_event_ndims=1,
                must_preserve_event_ndims=True).filter(
                    _preserves_vector_dim(dim)))
        logging.info('Using diagonal bijector %s %s', diag_bijector.name,
                     diag_bijector)

        bijector = tfb.TransformDiagonal(diag_bijector=diag_bijector)
        ensure_nonzero_batch = lambda shape: [d if d > 0 else 1 for d in shape]
        shape = data.draw(
            tfp_hps.shapes().map(ensure_nonzero_batch)) + [dim, dim]
        x = np.random.randn(*shape).astype(np.float64)
        y = self.evaluate(bijector.forward(x))
        bijector_test_util.assert_bijective_and_finite(bijector,
                                                       x,
                                                       y,
                                                       eval_func=self.evaluate,
                                                       event_ndims=2,
                                                       inverse_event_ndims=2,
                                                       rtol=1e-5)
        fldj = bijector.forward_log_det_jacobian(x, event_ndims=2)
        # For constant-jacobian bijectors, the zero fldj may not be broadcast.
        fldj = fldj + tf.zeros(tf.shape(x)[:-2], dtype=x.dtype)
        fldj_theoretical = bijector_test_util.get_fldj_theoretical(
            bijector, x, event_ndims=2, inverse_event_ndims=2)
        self.assertAllClose(self.evaluate(fldj_theoretical),
                            self.evaluate(fldj),
                            atol=1e-5,
                            rtol=1e-5)
예제 #7
0
def spherical_uniforms(draw,
                       batch_shape=None,
                       event_dim=None,
                       validate_args=True):
    """Strategy for drawing `SphericalUniform` distributions.

  The underlying distribution is drawn from the `distributions` strategy.

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      `SphericalUniform` distribution.
    event_dim: Optional Python int giving the size of the
      distribution's event dimension.
    validate_args: Python `bool`; whether to enable runtime assertions.

  Returns:
    dists: A strategy for drawing `UniformSphere` distributions with the
      specified `batch_shape` (or an arbitrary one if omitted).
  """
    if batch_shape is None:
        batch_shape = draw(tfp_hps.shapes(min_ndims=0, max_side=4))
    if event_dim is None:
        event_dim = draw(hps.integers(min_value=1, max_value=10))

    result_dist = tfd.SphericalUniform(dimension=event_dim,
                                       batch_shape=batch_shape,
                                       validate_args=validate_args)
    return result_dist
def domain_tensors(draw, bijector, shape=None):
  """Strategy for drawing Tensors in the domain of a bijector.

  If the bijector's domain is constrained, this proceeds by drawing an
  unconstrained Tensor and then transforming it to fit.  The constraints are
  declared in `bijectors.hypothesis_testlib.bijector_supports`.  The
  transformations are defined by `tfp_hps.constrainer`.

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    bijector: A `Bijector` in whose domain the Tensors will be.
    shape: An optional `TensorShape`.  The shape of the resulting
      Tensors.  Hypothesis will pick one if omitted.

  Returns:
    tensors: A strategy for drawing domain Tensors for the desired bijector.
  """
  if is_invert(bijector):
    return draw(codomain_tensors(bijector.bijector, shape))
  if shape is None:
    shape = draw(tfp_hps.shapes())
  bijector_name = type(bijector).__name__
  support = bijector_hps.bijector_supports()[bijector_name].forward
  if isinstance(bijector, tfb.PowerTransform):
    constraint_fn = bijector_hps.power_transform_constraint(bijector.power)
  else:
    constraint_fn = tfp_hps.constrainer(support)
  return draw(tfp_hps.constrained_tensors(constraint_fn, shape))
예제 #9
0
def codomain_tensors(draw, bijector, shape=None):
    """Strategy for drawing Tensors in the codomain of a bijector.

  If the bijector's codomain is constrained, this proceeds by drawing an
  unconstrained Tensor and then transforming it to fit.  The constraints are
  declared in `bijectors.hypothesis_testlib.bijector_supports`.  The
  transformations are defined by `tfp_hps.constrainer`.

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    bijector: A `Bijector` in whose codomain the Tensors will be.
    shape: An optional `TensorShape`.  The shape of the resulting
      Tensors.  Hypothesis will pick one if omitted.

  Returns:
    tensors: A strategy for drawing codomain Tensors for the desired bijector.
  """
    if is_invert(bijector):
        return draw(domain_tensors(bijector.bijector, shape))
    elif is_transform_diagonal(bijector):
        return draw(codomain_tensors(bijector.diag_bijector, shape))
    if shape is None:
        shape = draw(tfp_hps.shapes())
    bijector_name = type(bijector).__name__
    support = bhps.bijector_supports()[bijector_name].inverse
    if is_generalized_pareto(bijector):
        constraint_fn = bhps.generalized_pareto_constraint(
            bijector.loc, bijector.scale, bijector.concentration)
    elif isinstance(bijector, tfb.SoftClip):
        constraint_fn = bhps.softclip_constraint(bijector.low, bijector.high)
    else:
        constraint_fn = tfp_hps.constrainer(support)
    return draw(tfp_hps.constrained_tensors(constraint_fn, shape))
def base_kernels(draw,
                 kernel_name=None,
                 batch_shape=None,
                 event_dim=None,
                 feature_ndims=None,
                 enable_vars=False):
    if kernel_name is None:
        kernel_name = draw(hps.sampled_from(sorted(INSTANTIABLE_BASE_KERNELS)))
    if batch_shape is None:
        batch_shape = draw(tfp_hps.shapes())
    if event_dim is None:
        event_dim = draw(hps.integers(min_value=2, max_value=6))
    if feature_ndims is None:
        feature_ndims = draw(hps.integers(min_value=2, max_value=6))

    kernel_params = draw(
        broadcasting_params(kernel_name,
                            batch_shape,
                            event_dim=event_dim,
                            enable_vars=enable_vars))
    kernel_variable_names = [
        k for k in kernel_params if tensor_util.is_ref(kernel_params[k])
    ]
    hp.note('Forming kernel {} with constrained parameters {}'.format(
        kernel_name, kernel_params))
    ctor = getattr(tfpk, kernel_name)
    result_kernel = ctor(validate_args=True,
                         feature_ndims=feature_ndims,
                         **kernel_params)
    if batch_shape != result_kernel.batch_shape:
        msg = ('Kernel strategy generated a bad batch shape '
               'for {}, should have been {}.').format(result_kernel,
                                                      batch_shape)
        raise AssertionError(msg)
    return result_kernel, kernel_variable_names
예제 #11
0
def feature_transformeds(draw,
                         batch_shape=None,
                         event_dim=None,
                         feature_dim=None,
                         feature_ndims=None,
                         enable_vars=None,
                         depth=None):
    """Strategy for drawing `FeatureTransformed` kernels.

  The underlying kernel is drawn from the `kernels` strategy.

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      Kernel.  Hypothesis will pick a batch shape if omitted.
    event_dim: Optional Python int giving the size of each of the
      kernel's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    feature_dim: Optional Python int giving the size of each feature dimension.
      If omitted, Hypothesis will choose one.
    feature_ndims: Optional Python int stating the number of feature dimensions
      inputs will have. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all Tensors, never Variables or DeferredTensor.
    depth: Python `int` giving maximum nesting depth of compound kernel.

  Returns:
    kernels: A strategy for drawing `FeatureTransformed` kernels with the
      specified `batch_shape` (or an arbitrary one if omitted).
  """
    if depth is None:
        depth = draw(depths())
    if batch_shape is None:
        batch_shape = draw(tfp_hps.shapes())
    if event_dim is None:
        event_dim = draw(hps.integers(min_value=2, max_value=6))
    if feature_dim is None:
        feature_dim = draw(hps.integers(min_value=2, max_value=6))
    if feature_ndims is None:
        feature_ndims = draw(hps.integers(min_value=2, max_value=6))

    base_kernel, kernel_variable_names = draw(
        kernels(batch_shape=batch_shape,
                event_dim=event_dim,
                feature_dim=feature_dim,
                feature_ndims=feature_ndims,
                enable_vars=enable_vars,
                depth=depth - 1))

    hp.note('Forming FeatureTransformed kernel')

    result_kernel = tfpk.FeatureTransformed(
        kernel=base_kernel,
        transformation_fn=lambda x, feature_ndims, example_ndims: x**2.,
        validate_args=True)

    return result_kernel, kernel_variable_names
def base_distributions(draw,
                       dist_name=None,
                       batch_shape=None,
                       event_dim=None,
                       enable_vars=False,
                       eligibility_filter=lambda name: True):
  """Strategy for drawing arbitrary base Distributions.

  This does not draw compound distributions like `Independent`,
  `MixtureSameFamily`, or `TransformedDistribution`; only base Distributions
  that do not accept other Distributions as arguments.

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    dist_name: Optional Python `str`.  If given, the produced distributions
      will all have this type.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      Distribution.  Hypothesis will pick a batch shape if omitted.
    event_dim: Optional Python int giving the size of each of the
      distribution's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all Tensors, never Variables or DeferredTensor.
    eligibility_filter: Optional Python callable.  Blacklists some Distribution
      class names so they will not be drawn at the top level.

  Returns:
    dists: A strategy for drawing Distributions with the specified `batch_shape`
      (or an arbitrary one if omitted).
  """
  if dist_name is None:
    names = [k for k in INSTANTIABLE_BASE_DISTS.keys() if eligibility_filter(k)]
    dist_name = draw(hps.sampled_from(sorted(names)))

  if dist_name == 'Empirical':
    variants = [k for k in INSTANTIABLE_BASE_DISTS.keys()
                if eligibility_filter(k) and 'Empirical' in k]
    dist_name = draw(hps.sampled_from(sorted(variants)))

  if batch_shape is None:
    batch_shape = draw(tfp_hps.shapes())

  params_kwargs = draw(
      broadcasting_params(
          dist_name, batch_shape, event_dim=event_dim, enable_vars=enable_vars))
  params_constrained = constraint_for(dist_name)(params_kwargs)
  hp.note('Forming dist {} with constrained parameters {}'.format(
      dist_name, params_constrained))
  assert_shapes_unchanged(params_kwargs, params_constrained)
  params_constrained['validate_args'] = True
  dist_cls = INSTANTIABLE_BASE_DISTS[dist_name].cls
  result_dist = dist_cls(**params_constrained)
  if batch_shape != result_dist.batch_shape:
    msg = ('Distributions strategy generated a bad batch shape '
           'for {}, should have been {}.').format(result_dist, batch_shape)
    raise AssertionError(msg)
  return result_dist
예제 #13
0
def batch_reshapes(
    draw, batch_shape=None, event_dim=None,
    enable_vars=False, depth=None,
    eligibility_filter=lambda name: True, validate_args=True):
  """Strategy for drawing `BatchReshape` distributions.

  The underlying distribution is drawn from the `distributions` strategy.

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      `BatchReshape` distribution.  Note that the underlying distribution will
      in general have a different batch shape, to make the reshaping
      non-trivial.  Hypothesis will pick one if omitted.
    event_dim: Optional Python int giving the size of each of the underlying
      distribution's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all `tf.Tensor`s and not {`tf.Variable`, `tfp.util.DeferredTensor`
      `tfp.util.TransformedVariable`}
    depth: Python `int` giving maximum nesting depth of compound Distributions.
    eligibility_filter: Optional Python callable.  Blacklists some Distribution
      class names so they will not be drawn.
    validate_args: Python `bool`; whether to enable runtime assertions.

  Returns:
    dists: A strategy for drawing `BatchReshape` distributions with the
      specified `batch_shape` (or an arbitrary one if omitted).
  """
  if depth is None:
    depth = draw(depths())

  if batch_shape is None:
    batch_shape = draw(tfp_hps.shapes(min_ndims=1, max_side=4))

  # TODO(b/142135119): Wanted to draw general input and output shapes like the
  # following, but Hypothesis complained about filtering out too many things.
  # underlying_batch_shape = draw(tfp_hps.shapes(min_ndims=1))
  # hp.assume(
  #   batch_shape.num_elements() == underlying_batch_shape.num_elements())
  underlying_batch_shape = [tf.TensorShape(batch_shape).num_elements()]

  underlying = draw(
      distributions(
          batch_shape=underlying_batch_shape,
          event_dim=event_dim,
          enable_vars=enable_vars,
          depth=depth - 1,
          eligibility_filter=eligibility_filter,
          validate_args=validate_args))
  hp.note('Forming BatchReshape with underlying dist {}; '
          'parameters {}; batch_shape {}'.format(
              underlying, params_used(underlying), batch_shape))
  result_dist = tfd.BatchReshape(
      underlying, batch_shape=batch_shape, validate_args=True)
  return result_dist
예제 #14
0
def kernels(draw,
            kernel_name=None,
            batch_shape=None,
            event_dim=None,
            feature_ndims=None,
            enable_vars=False):
    """Strategy for drawing arbitrary Kernels.

  Args:
    draw: Hypothesis function supplied by `@hps.composite`.
    kernel_name: Optional Python `str`.  If given, the produced kernels
      will all have this type.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      Kernel.  Hypothesis will pick a batch shape if omitted.
    event_dim: Optional Python int giving the size of each of the
      kernel's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    feature_ndims: Optional Python int stating the number of feature dimensions
      inputs will have. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all Tensors, never Variables or DeferredTensor.
  Returns:
    kernels: A strategy for drawing Kernels with the specified `batch_shape`
      (or an arbitrary one if omitted).
    kernel_variable_names: List of kernel parameters that are variables.
  """

    if kernel_name is None:
        kernel_name = draw(hps.sampled_from(TF2_FRIENDLY_KERNELS))
    if batch_shape is None:
        batch_shape = draw(tfp_hps.shapes())
    if event_dim is None:
        event_dim = draw(hps.integers(min_value=2, max_value=6))
    if feature_ndims is None:
        feature_ndims = draw(hps.integers(min_value=2, max_value=6))

    kernel_params = draw(
        broadcasting_params(kernel_name,
                            batch_shape,
                            event_dim=event_dim,
                            enable_vars=enable_vars))
    kernel_variable_names = [
        k for k in kernel_params if tensor_util.is_ref(kernel_params[k])
    ]
    ctor = getattr(tfp.math.psd_kernels, kernel_name)
    result_kernel = ctor(validate_args=True,
                         feature_ndims=feature_ndims,
                         **kernel_params)

    if batch_shape != result_kernel.batch_shape:
        msg = ('Kernel strategy generated a bad batch shape '
               'for {}, should have been {}.').format(result_kernel,
                                                      batch_shape)
        raise AssertionError(msg)
    return result_kernel, kernel_variable_names
예제 #15
0
    def testKernelGradient(self, kernel_name, data):
        event_dim = data.draw(hps.integers(min_value=2, max_value=3))
        feature_ndims = data.draw(hps.integers(min_value=1, max_value=2))
        feature_dim = data.draw(hps.integers(min_value=2, max_value=4))
        batch_shape = data.draw(tfp_hps.shapes(max_ndims=2))

        kernel, kernel_parameter_variable_names = data.draw(
            kernel_hps.kernels(batch_shape=batch_shape,
                               kernel_name=kernel_name,
                               event_dim=event_dim,
                               feature_dim=feature_dim,
                               feature_ndims=feature_ndims,
                               enable_vars=True))

        # Check that variable parameters get passed to the kernel.variables
        kernel_variables_names = [
            v.name.strip('_0123456789:') for v in kernel.variables
        ]
        kernel_parameter_variable_names = [
            n.strip('_0123456789:') for n in kernel_parameter_variable_names
        ]
        self.assertEqual(set(kernel_parameter_variable_names),
                         set(kernel_variables_names))

        example_ndims = data.draw(hps.integers(min_value=1, max_value=2))
        input_batch_shape = data.draw(
            tfp_hps.broadcast_compatible_shape(kernel.batch_shape))
        xs = tf.identity(
            data.draw(
                kernel_hps.kernel_input(batch_shape=input_batch_shape,
                                        example_ndims=example_ndims,
                                        feature_dim=feature_dim,
                                        feature_ndims=feature_ndims)))

        # Check that we pick up all relevant kernel parameters.
        wrt_vars = [xs] + list(kernel.variables)
        self.evaluate([v.initializer for v in kernel.variables])

        max_permissible = 2 + EXTRA_TENSOR_CONVERSION_KERNELS.get(
            kernel_name, 0)

        with tf.GradientTape() as tape:
            with tfp_hps.assert_no_excessive_var_usage(
                    'method `apply` of {}'.format(kernel),
                    max_permissible=max_permissible):
                tape.watch(wrt_vars)
                with tfp_hps.no_tf_rank_errors():
                    diag = kernel.apply(xs, xs, example_ndims=example_ndims)
        grads = tape.gradient(diag, wrt_vars)
        assert_no_none_grad(kernel, 'apply', wrt_vars, grads)

        # Check that copying the kernel works.
        with tfp_hps.no_tf_rank_errors():
            diag2 = self.evaluate(kernel.copy().apply(
                xs, xs, example_ndims=example_ndims))
        self.assertAllClose(diag, diag2)
예제 #16
0
def stochastic_processes(draw,
                         process_name=None,
                         kernel_name=None,
                         batch_shape=None,
                         event_dim=None,
                         feature_dim=None,
                         feature_ndims=None,
                         enable_vars=False):
    if process_name is None:
        process_name = draw(
            hps.sampled_from(sorted(PARAM_EVENT_NDIMS_BY_PROCESS_NAME.keys())))
    if batch_shape is None:
        batch_shape = draw(tfp_hps.shapes())
    if event_dim is None:
        event_dim = draw(hps.integers(min_value=2, max_value=4))
    if feature_dim is None:
        feature_dim = draw(hps.integers(min_value=2, max_value=4))
    if feature_ndims is None:
        feature_ndims = draw(hps.integers(min_value=1, max_value=3))

    if process_name == 'GaussianProcess':
        return draw(
            gaussian_processes(kernel_name=kernel_name,
                               batch_shape=batch_shape,
                               event_dim=event_dim,
                               feature_dim=feature_dim,
                               feature_ndims=feature_ndims,
                               enable_vars=enable_vars))
    elif process_name == 'GaussianProcessRegressionModel':
        return draw(
            gaussian_process_regression_models(kernel_name=kernel_name,
                                               batch_shape=batch_shape,
                                               event_dim=event_dim,
                                               feature_dim=feature_dim,
                                               feature_ndims=feature_ndims,
                                               enable_vars=enable_vars))
    elif process_name == 'VariationalGaussianProcess':
        return draw(
            variational_gaussian_processes(kernel_name=kernel_name,
                                           batch_shape=batch_shape,
                                           event_dim=event_dim,
                                           feature_dim=feature_dim,
                                           feature_ndims=feature_ndims,
                                           enable_vars=enable_vars))
    elif process_name == 'StudentTProcess':
        return draw(
            student_t_processes(kernel_name=kernel_name,
                                batch_shape=batch_shape,
                                event_dim=event_dim,
                                feature_dim=feature_dim,
                                feature_ndims=feature_ndims,
                                enable_vars=enable_vars))
    raise ValueError('Stochastic process "{}" not found.'.format(process_name))
예제 #17
0
def bijectors(draw,
              bijector_name=None,
              batch_shape=None,
              event_dim=None,
              enable_vars=False):
    """Strategy for drawing Bijectors.

  The emitted bijector may be a basic bijector or an `Invert` of a basic
  bijector, but not a compound like `Chain`.

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    bijector_name: Optional Python `str`.  If given, the produced bijectors
      will all have this type.  If omitted, Hypothesis chooses one from
      the whitelist `TF2_FRIENDLY_BIJECTORS`.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      bijector.  Hypothesis will pick one if omitted.
    event_dim: Optional Python int giving the size of each of the underlying
      distribution's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all Tensors, never Variables or DeferredTensor.

  Returns:
    bijectors: A strategy for drawing bijectors with the specified `batch_shape`
      (or an arbitrary one if omitted).
  """
    if bijector_name is None:
        bijector_name = draw(hps.one_of(map(hps.just, TF2_FRIENDLY_BIJECTORS)))
    if batch_shape is None:
        batch_shape = draw(tfp_hps.shapes())
    if event_dim is None:
        event_dim = draw(hps.integers(min_value=2, max_value=6))
    if bijector_name == 'Invert':
        underlying_name = draw(
            hps.one_of(map(hps.just,
                           set(TF2_FRIENDLY_BIJECTORS) - {'Invert'})))
        underlying = draw(
            bijectors(bijector_name=underlying_name,
                      batch_shape=batch_shape,
                      event_dim=event_dim,
                      enable_vars=enable_vars))
        return tfb.Invert(underlying, validate_args=True)

    bijector_params = draw(
        broadcasting_params(bijector_name,
                            batch_shape,
                            event_dim=event_dim,
                            enable_vars=enable_vars))
    ctor = getattr(tfb, bijector_name)
    return ctor(validate_args=True, **bijector_params)
예제 #18
0
  def test_log_prob(self, data):
    batch_shape = data.draw(tfp_hps.shapes())
    bcast_arg, dist_batch_shp = data.draw(
        tfp_hps.broadcasting_shapes(batch_shape, 2))

    underlying = tfd.Normal(
        loc=tf.reshape(
            tf.range(float(np.prod(tensorshape_util.as_list(dist_batch_shp)))),
            dist_batch_shp),
        scale=0.01)

    if not self.is_static_shape:
      bcast_arg = tf.Variable(bcast_arg)
      self.evaluate(bcast_arg.initializer)
    dist = tfd.BatchBroadcast(underlying, bcast_arg)
    sample_shape = data.draw(hps.one_of(hps.integers(0, 13), tfp_hps.shapes()))
    sample_batch_event = tf.concat([np.int32(sample_shape).reshape([-1]),
                                    batch_shape,
                                    dist.event_shape_tensor()],
                                   axis=0)

    obsv = tf.broadcast_to(dist.distribution.loc, sample_batch_event)
    self.assertAllTrue(dist.log_prob(obsv) > dist.log_prob(obsv + .5))
예제 #19
0
 def test_shapes(self, data):
   batch_shape = data.draw(tfp_hps.shapes())
   bcast_arg, dist_batch_shp = data.draw(
       tfp_hps.broadcasting_shapes(batch_shape, 2))
   underlying = data.draw(tfd_hps.distributions(batch_shape=dist_batch_shp))
   if not self.is_static_shape:
     bcast_arg = tf.Variable(bcast_arg)
     self.evaluate(bcast_arg.initializer)
   dist = tfd.BatchBroadcast(underlying, bcast_arg)
   if self.is_static_shape:
     self.assertEqual(batch_shape, dist.batch_shape)
     self.assertEqual(underlying.event_shape, dist.event_shape)
   self.assertAllEqual(batch_shape, dist.batch_shape_tensor())
   self.assertAllEqual(underlying.event_shape_tensor(),
                       dist.event_shape_tensor())
예제 #20
0
def base_distribution_unconstrained_params(draw,
                                           dist_name,
                                           batch_shape=None,
                                           event_dim=None,
                                           enable_vars=False,
                                           params=None):
  """Strategy for drawing unconstrained parameters of a base Distribution.

  This does not draw parameters for compound distributions like `Independent`,
  `MixtureSameFamily`, or `TransformedDistribution`; only base Distributions
  that do not accept other Distributions as arguments.

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    dist_name: Optional Python `str`.  If given, the produced distributions
      will all have this type.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      Distribution.  Hypothesis will pick a batch shape if omitted.
    event_dim: Optional Python int giving the size of each of the
      distribution's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all `tf.Tensor`s and not {`tf.Variable`, `tfp.util.DeferredTensor`
      `tfp.util.TransformedVariable`}.
    params: An optional set of Distribution parameters. If params are not
      provided, Hypothesis will choose a set of parameters.

  Returns:
    dists: A strategy for drawing Distribution parameters with the specified
    `batch_shape` (or an arbitrary one if omitted).
  """
  if params is not None:
    assert batch_shape is not None, ('Need to pass in valid `batch_shape` when'
                                     ' passing in `params`.')
    return params, batch_shape
  if batch_shape is None:
    batch_shape = draw(tfp_hps.shapes())

  # Draw raw parameters
  params_kwargs = draw(
      broadcasting_params(
          dist_name, batch_shape, event_dim=event_dim, enable_vars=enable_vars))
  hp.note('Forming dist {} with raw parameters {}'.format(
      dist_name, params_kwargs))

  return params_kwargs, batch_shape
def generalized_paretos(draw, batch_shape=None):
  if batch_shape is None:
    batch_shape = draw(tfp_hps.shapes())

  constraints = dict(
      loc=tfp_hps.identity_fn,
      scale=tfp_hps.softplus_plus_eps(),
      concentration=lambda x: tf.math.tanh(x) * 0.24)  # <.25==safe for variance

  params = draw(
      tfp_hps.broadcasting_params(
          batch_shape,
          params_event_ndims=dict(loc=0, scale=0, concentration=0),
          constraint_fn_for=constraints.get))
  dist = tfd.GeneralizedPareto(validate_args=draw(hps.booleans()), **params)
  if dist.batch_shape != batch_shape:
    raise AssertionError('batch_shape mismatch: expect {} but got {}'.format(
        batch_shape, dist))
  return dist
 def testTheoreticalFldj(self, data):
   if not tf.executing_eagerly():
     msg = ('Testing eager mode only because graph is very slow, '
            'presumably due to costly graph construction.')
     self.skipTest(msg)
   if JAX_MODE:  # TODO(b/160167257): Eliminate this workaround.
     # get_fldj_theoretical uses tfp.math.batch_jacobian and assumes the
     # behavior of the bijector does not vary by position. In this case, it
     # can, so we must vmap the result.
     batch_shape = [1]
   else:
     # get_fldj_theoretical test rig requires 1-d batches.
     batch_shape = data.draw(tfp_hps.shapes(min_ndims=1, max_ndims=1))
   hp.note('batch shape: {}'.format(batch_shape))
   bijector = data.draw(rq_splines(batch_shape=batch_shape, dtype=tf.float64))
   self.assertEqual(tf.float64, bijector.dtype)
   bw, bh, kd = self.evaluate(
       [bijector.bin_widths, bijector.bin_heights, bijector.knot_slopes])
   hp.note('bw: {!r}\nbh: {!r}\nkd: {!r}'.format(bw, bh, kd))
   x_shp = ((bw + bh)[..., :-1] + kd).shape[:-1]
   if x_shp[-1] == 1:  # Possibly broadcast the x dim.
     dim = data.draw(hps.integers(min_value=1, max_value=7))
     x_shp = x_shp[:-1] + (dim,)
   x = np.linspace(-4.9, 4.9, np.prod(x_shp), dtype=np.float64).reshape(*x_shp)
   hp.note('x: {!r}'.format(x))
   y = self.evaluate(bijector.forward(x))
   hp.note('x: {!r}'.format(x))
   bijector_test_util.assert_bijective_and_finite(
       bijector,
       x,
       y,
       eval_func=self.evaluate,
       event_ndims=0,
       inverse_event_ndims=0,
       rtol=1e-5)
   fldj = bijector.forward_log_det_jacobian(x, event_ndims=0)
   fldj_theoretical = bijector_test_util.get_fldj_theoretical(
       bijector, x, event_ndims=0)
   self.assertAllClose(
       self.evaluate(fldj_theoretical),
       self.evaluate(fldj),
       atol=1e-5,
       rtol=1e-5)
예제 #23
0
 def test_default_bijector(self, data):
     batch_shape = data.draw(tfp_hps.shapes())
     bcast_arg, dist_batch_shp = data.draw(
         tfp_hps.broadcasting_shapes(batch_shape, 2))
     underlying = data.draw(
         tfd_hps.distributions(batch_shape=dist_batch_shp))
     if not self.is_static_shape:
         bcast_arg = tf.Variable(bcast_arg)
         self.evaluate(bcast_arg.initializer)
     dist = tfd.BatchBroadcast(underlying, bcast_arg)
     bijector = dist.experimental_default_event_space_bijector()
     hp.assume(bijector is not None)
     shp = bijector.inverse_event_shape_tensor(
         tf.concat([dist.batch_shape_tensor(),
                    dist.event_shape_tensor()],
                   axis=0))
     obs = bijector.forward(
         tf.random.normal(shp, seed=test_util.test_seed()))
     with tf.control_dependencies(dist._sample_control_dependencies(obs)):
         self.evaluate(tf.identity(obs))
    def testDistribution(self, dist_name, data):
        dist = data.draw(
            dhps.base_distributions(
                dist_name=dist_name,
                enable_vars=False,
                # Unregularized MLEs can be numerically problematic, e.g., empirical
                # (co)variances can be singular. To avoid such numerical issues, we
                # sanity-check the MLE only for a fixed sample with assumed-sane
                # parameter values (zeros constrained to the parameter support).
                param_strategy_fn=_constrained_zeros_fn,
                batch_shape=data.draw(
                    tfp_hps.shapes(min_ndims=0, max_ndims=2, max_side=5))))
        x, lp = self.evaluate(
            dist.experimental_sample_and_log_prob(
                10, seed=test_util.test_seed(sampler_type='stateless')))

        try:
            parameters = self.evaluate(
                type(dist)._maximum_likelihood_parameters(x))
        except NotImplementedError:
            self.skipTest('Fitting not implemented.')

        flat_params = tf.nest.flatten(parameters)
        lp_fn = lambda *flat_params: type(dist)(  # pylint: disable=g-long-lambda
            validate_args=True,
            **tf.nest.pack_sequence_as(parameters, flat_params)).log_prob(x)
        lp_mle, grads = self.evaluate(
            tfp_math.value_and_gradient(lp_fn, flat_params))

        # Likelihood of MLE params should be higher than of the original params.
        self.assertAllGreaterEqual(
            tf.reduce_sum(lp_mle, axis=0) - tf.reduce_sum(lp, axis=0), -1e-4)

        if dist_name not in MLE_AT_CONSTRAINT_BOUNDARY:
            # MLE parameters should be a critical point of the log prob.
            for g in grads:
                if np.any(np.isnan(g)):
                    # Skip parameters with undefined or unstable gradients (e.g.,
                    # Categorical `num_classes`).
                    continue
                self.assertAllClose(tf.zeros_like(g), g, atol=1e-2)
예제 #25
0
    def testDistribution(self, data):
        enable_vars = data.draw(hps.booleans())

        # TODO(b/146572907): Fix `enable_vars` for metadistributions.
        broken_dists = EVENT_SPACE_BIJECTOR_IS_BROKEN
        if enable_vars:
            broken_dists.extend(dhps.INSTANTIABLE_META_DISTS)

        dist = data.draw(
            dhps.distributions(
                enable_vars=enable_vars,
                eligibility_filter=(lambda name: name not in broken_dists)))
        self.evaluate([var.initializer for var in dist.variables])
        self.check_bad_loc_scale(dist)

        event_space_bijector = dist._experimental_default_event_space_bijector(
        )
        if event_space_bijector is None:
            return

        total_sample_shape = tensorshape_util.concatenate(
            # Draw a sample shape
            data.draw(tfp_hps.shapes()),
            # Draw a shape that broadcasts with `[batch_shape, inverse_event_shape]`
            # where `inverse_event_shape` is the event shape in the bijector's
            # domain. This is the shape of `y` in R**n, such that
            # x = event_space_bijector(y) has the event shape of the distribution.
            data.draw(
                tfp_hps.broadcasting_shapes(tensorshape_util.concatenate(
                    dist.batch_shape,
                    event_space_bijector.inverse_event_shape(
                        dist.event_shape)),
                                            n=1))[0])

        y = data.draw(
            tfp_hps.constrained_tensors(tfp_hps.identity_fn,
                                        total_sample_shape.as_list()))
        x = event_space_bijector(y)
        with tf.control_dependencies(dist._sample_control_dependencies(x)):
            self.evaluate(tf.identity(x))
    def check_event_space_bijector_constrains(self, dist, data):
        event_space_bijector = dist.experimental_default_event_space_bijector()
        if event_space_bijector is None:
            return

        # Draw a sample shape
        sample_shape = data.draw(tfp_hps.shapes())
        inv_event_shape = event_space_bijector.inverse_event_shape(
            tensorshape_util.concatenate(dist.batch_shape, dist.event_shape))

        # Draw a shape that broadcasts with `[batch_shape, inverse_event_shape]`
        # where `inverse_event_shape` is the event shape in the bijector's
        # domain. This is the shape of `y` in R**n, such that
        # x = event_space_bijector(y) has the event shape of the distribution.

        # TODO(b/174778703): Actually draw broadcast compatible shapes.
        batch_inv_event_compat_shape = inv_event_shape
        # batch_inv_event_compat_shape = data.draw(
        #     tfp_hps.broadcast_compatible_shape(inv_event_shape))
        # batch_inv_event_compat_shape = tensorshape_util.concatenate(
        #     (1,) * (len(inv_event_shape) - len(batch_inv_event_compat_shape)),
        #     batch_inv_event_compat_shape)

        total_sample_shape = tensorshape_util.concatenate(
            sample_shape, batch_inv_event_compat_shape)
        # full_sample_batch_event_shape = tensorshape_util.concatenate(
        #     sample_shape, inv_event_shape)

        y = data.draw(
            tfp_hps.constrained_tensors(tfp_hps.identity_fn,
                                        total_sample_shape.as_list()))
        hp.note('Trying to constrain inputs {}'.format(y))
        with tfp_hps.no_tf_rank_errors():
            x = event_space_bijector(y)
            hp.note('Got constrained samples {}'.format(x))
            with tf.control_dependencies(dist._sample_control_dependencies(x)):
                self.evaluate(tensor_util.identity_as_tensor(x))
예제 #27
0
def mixtures_same_family(draw,
                         batch_shape=None,
                         event_dim=None,
                         enable_vars=False,
                         depth=None):
    """Strategy for drawing `MixtureSameFamily` distributions.

  The component distribution is drawn from the `distributions` strategy.

  The Categorical mixture distributions are either shared across all batch
  members, or drawn independently for the full batch (as required by
  `MixtureSameFamily`).

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      `MixtureSameFamily` distribution.  The component distribution will have a
      batch shape of 1 rank higher (for the components being mixed).  Hypothesis
      will pick a batch shape if omitted.
    event_dim: Optional Python int giving the size of each of the component
      distribution's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all Tensors, never Variables or DeferredTensor.
    depth: Python `int` giving maximum nesting depth of compound Distributions.

  Returns:
    dists: A strategy for drawing `MixtureSameFamily` distributions with the
      specified `batch_shape` (or an arbitrary one if omitted).
  """
    if depth is None:
        depth = draw(depths())

    if batch_shape is None:
        # Ensure the components dist has at least one batch dim (a component dim).
        batch_shape = draw(tfp_hps.shapes(min_ndims=1, min_lastdimsize=2))
    else:  # This mixture adds a batch dim to its underlying components dist.
        batch_shape = tensorshape_util.concatenate(
            batch_shape,
            draw(tfp_hps.shapes(min_ndims=1, max_ndims=1, min_lastdimsize=2)))

    component = draw(
        distributions(batch_shape=batch_shape,
                      event_dim=event_dim,
                      enable_vars=enable_vars,
                      depth=depth - 1))
    hp.note(
        'Drawing MixtureSameFamily with component {}; parameters {}'.format(
            component, params_used(component)))
    # scalar or same-shaped categorical?
    mixture_batch_shape = draw(
        hps.one_of(hps.just(batch_shape[:-1]), hps.just(tf.TensorShape([]))))
    mixture_dist = draw(
        base_distributions(dist_name='Categorical',
                           batch_shape=mixture_batch_shape,
                           event_dim=tensorshape_util.as_list(batch_shape)[-1],
                           enable_vars=enable_vars))
    hp.note(('Forming MixtureSameFamily with '
             'mixture distribution {}; parameters {}').format(
                 mixture_dist, params_used(mixture_dist)))
    result_dist = tfd.MixtureSameFamily(components_distribution=component,
                                        mixture_distribution=mixture_dist,
                                        validate_args=True)
    if batch_shape[:-1] != result_dist.batch_shape:
        msg = ('MixtureSameFamily strategy generated a bad batch shape '
               'for {}, should have been {}.').format(result_dist,
                                                      batch_shape[:-1])
        raise AssertionError(msg)
    return result_dist
예제 #28
0
def bijectors(draw,
              bijector_name=None,
              batch_shape=None,
              event_dim=None,
              enable_vars=False,
              allowed_bijectors=None,
              validate_args=True,
              return_duplicate=False):
    """Strategy for drawing Bijectors.

  The emitted bijector may be a basic bijector or an `Invert` of a basic
  bijector, but not a compound like `Chain`.

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    bijector_name: Optional Python `str`.  If given, the produced bijectors
      will all have this type.  If omitted, Hypothesis chooses one from
      the allowlist `INSTANTIABLE_BIJECTORS`.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      bijector.  Hypothesis will pick one if omitted.
    event_dim: Optional Python int giving the size of each of the underlying
      distribution's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all `tf.Tensor`s and not {`tf.Variable`, `tfp.util.DeferredTensor`
      `tfp.util.TransformedVariable`}
    allowed_bijectors: Optional list of `str` Bijector names to sample from.
      Bijectors not in this list will not be returned or instantiated as
      part of a meta-bijector (Chain, Invert, etc.). Defaults to
      `INSTANTIABLE_BIJECTORS`.
    validate_args: Python `bool`; whether to enable runtime checks.
    return_duplicate: Python `bool`: If `False` return a single bijector. If
      `True` return a tuple of two bijectors of the same type, instantiated with
      the same parameters.

  Returns:
    bijectors: A strategy for drawing bijectors with the specified `batch_shape`
      (or an arbitrary one if omitted).
  """
    if allowed_bijectors is None:
        allowed_bijectors = bhps.INSTANTIABLE_BIJECTORS
    if bijector_name is None:
        bijector_name = draw(hps.sampled_from(allowed_bijectors))
    if batch_shape is None:
        batch_shape = draw(tfp_hps.shapes())
    if event_dim is None:
        event_dim = draw(hps.integers(min_value=2, max_value=6))
    if bijector_name == 'Invert':
        underlying_name = draw(
            hps.sampled_from(sorted(set(allowed_bijectors) - {'Invert'})))
        underlying = draw(
            bijectors(bijector_name=underlying_name,
                      batch_shape=batch_shape,
                      event_dim=event_dim,
                      enable_vars=enable_vars,
                      allowed_bijectors=allowed_bijectors,
                      validate_args=validate_args))
        bijector_params = {'bijector': underlying}
        msg = 'Forming Invert bijector with underlying bijector {}.'
        hp.note(msg.format(underlying))
    elif bijector_name == 'TransformDiagonal':
        underlying_name = draw(
            hps.sampled_from(
                sorted(
                    set(allowed_bijectors)
                    & set(bhps.TRANSFORM_DIAGONAL_ALLOWLIST))))
        underlying = draw(
            bijectors(bijector_name=underlying_name,
                      batch_shape=(),
                      event_dim=event_dim,
                      enable_vars=enable_vars,
                      allowed_bijectors=allowed_bijectors,
                      validate_args=validate_args))
        bijector_params = {'diag_bijector': underlying}
        msg = 'Forming TransformDiagonal bijector with underlying bijector {}.'
        hp.note(msg.format(underlying))
    elif bijector_name == 'Inline':
        scale = draw(
            tfp_hps.maybe_variable(
                hps.sampled_from(np.float32([1., -1., 2, -2.])), enable_vars))
        b = tfb.Scale(scale=scale)

        bijector_params = dict(
            forward_fn=CallableModule(b.forward, b),
            inverse_fn=b.inverse,
            forward_log_det_jacobian_fn=lambda x: b.forward_log_det_jacobian(  # pylint: disable=g-long-lambda
                x,
                event_ndims=b.forward_min_event_ndims),
            forward_min_event_ndims=b.forward_min_event_ndims,
            is_constant_jacobian=b.is_constant_jacobian,
            is_increasing=b._internal_is_increasing,  # pylint: disable=protected-access
        )
    elif bijector_name == 'DiscreteCosineTransform':
        dct_type = hps.integers(min_value=2, max_value=3)
        bijector_params = {'dct_type': draw(dct_type)}
    elif bijector_name == 'GeneralizedPareto':
        concentration = hps.floats(min_value=-200., max_value=200)
        scale = hps.floats(min_value=1e-2, max_value=200)
        loc = hps.floats(min_value=-200, max_value=200)
        bijector_params = {
            'concentration': draw(concentration),
            'scale': draw(scale),
            'loc': draw(loc)
        }
    elif bijector_name == 'PowerTransform':
        power = hps.floats(min_value=1e-6, max_value=10.)
        bijector_params = {'power': draw(power)}
    elif bijector_name == 'Permute':
        event_ndims = draw(hps.integers(min_value=1, max_value=2))
        axis = hps.integers(min_value=-event_ndims, max_value=-1)
        # This is a permutation of dimensions within an axis.
        # (Contrast with `Transpose` below.)
        bijector_params = {
            'axis':
            draw(axis),
            'permutation':
            draw(
                tfp_hps.maybe_variable(hps.permutations(np.arange(event_dim)),
                                       enable_vars,
                                       dtype=tf.int32))
        }
    elif bijector_name == 'Reshape':
        event_shape_out = draw(tfp_hps.shapes(min_ndims=1))
        # TODO(b/142135119): Wanted to draw general input and output shapes like the
        # following, but Hypothesis complained about filtering out too many things.
        # event_shape_in = draw(tfp_hps.shapes(min_ndims=1))
        # hp.assume(event_shape_out.num_elements() == event_shape_in.num_elements())
        event_shape_in = [event_shape_out.num_elements()]
        bijector_params = {
            'event_shape_out': event_shape_out,
            'event_shape_in': event_shape_in
        }
    elif bijector_name == 'Transpose':
        event_ndims = draw(hps.integers(min_value=0, max_value=2))
        # This is a permutation of axes.
        # (Contrast with `Permute` above.)
        bijector_params = {
            'perm': draw(hps.permutations(np.arange(event_ndims)))
        }
    else:
        params_event_ndims = bhps.INSTANTIABLE_BIJECTORS[
            bijector_name].params_event_ndims
        bijector_params = draw(
            tfp_hps.broadcasting_params(
                batch_shape,
                params_event_ndims,
                event_dim=event_dim,
                enable_vars=enable_vars,
                constraint_fn_for=lambda param: constraint_for(
                    bijector_name, param),  # pylint:disable=line-too-long
                mutex_params=MUTEX_PARAMS))
        bijector_params = constrain_params(bijector_params, bijector_name)

    ctor = getattr(tfb, bijector_name)
    hp.note('Forming {} bijector with params {}.'.format(
        bijector_name, bijector_params))
    bijector = ctor(validate_args=validate_args, **bijector_params)
    if not return_duplicate:
        return bijector
    return (bijector, ctor(validate_args=validate_args, **bijector_params))
예제 #29
0
def transformed_distributions(draw,
                              batch_shape=None,
                              event_dim=None,
                              enable_vars=False,
                              depth=None):
    """Strategy for drawing `TransformedDistribution`s.

  The transforming bijector is drawn from the
  `bijectors.hypothesis_testlib.unconstrained_bijectors` strategy.

  The underlying distribution is drawn from the `distributions` strategy, except
  that it must be compatible with the bijector according to
  `bijectors.hypothesis_testlib.distribution_filter_for` (these generally check
  that vector bijectors are not combined with scalar distributions, etc).

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      `TransformedDistribution`.  The underlying distribution will sometimes
      have the same `batch_shape`, and sometimes have scalar batch shape.
      Hypothesis will pick a `batch_shape` if omitted.
    event_dim: Optional Python int giving the size of each of the underlying
      distribution's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all Tensors, never Variables or DeferredTensor.
    depth: Python `int` giving maximum nesting depth of compound Distributions.

  Returns:
    dists: A strategy for drawing `TransformedDistribution`s with the specified
      `batch_shape` (or an arbitrary one if omitted).
  """
    if depth is None:
        depth = draw(depths())

    bijector = draw(bijector_hps.unconstrained_bijectors())
    hp.note(
        'Drawing TransformedDistribution with bijector {}'.format(bijector))
    if batch_shape is None:
        batch_shape = draw(tfp_hps.shapes())
    underlying_batch_shape = batch_shape
    batch_shape_arg = None
    if draw(hps.booleans()):
        # Use batch_shape overrides.
        underlying_batch_shape = tf.TensorShape([])  # scalar underlying batch
        batch_shape_arg = batch_shape
    underlyings = distributions(
        batch_shape=underlying_batch_shape,
        event_dim=event_dim,
        enable_vars=enable_vars,
        depth=depth - 1).filter(bijector_hps.distribution_filter_for(bijector))
    to_transform = draw(underlyings)
    hp.note('Forming TransformedDistribution with '
            'underlying distribution {}; parameters {}'.format(
                to_transform, params_used(to_transform)))
    # TODO(bjp): Add test coverage for `event_shape` argument of
    # `TransformedDistribution`.
    result_dist = tfd.TransformedDistribution(bijector=bijector,
                                              distribution=to_transform,
                                              batch_shape=batch_shape_arg,
                                              validate_args=True)
    if batch_shape != result_dist.batch_shape:
        msg = ('TransformedDistribution strategy generated a bad batch shape '
               'for {}, should have been {}.').format(result_dist, batch_shape)
        raise AssertionError(msg)
    return result_dist
def bijectors(draw, bijector_name=None, batch_shape=None, event_dim=None,
              enable_vars=False):
  """Strategy for drawing Bijectors.

  The emitted bijector may be a basic bijector or an `Invert` of a basic
  bijector, but not a compound like `Chain`.

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    bijector_name: Optional Python `str`.  If given, the produced bijectors
      will all have this type.  If omitted, Hypothesis chooses one from
      the whitelist `TF2_FRIENDLY_BIJECTORS`.
    batch_shape: An optional `TensorShape`.  The batch shape of the resulting
      bijector.  Hypothesis will pick one if omitted.
    event_dim: Optional Python int giving the size of each of the underlying
      distribution's parameters' event dimensions.  This is shared across all
      parameters, permitting square event matrices, compatible location and
      scale Tensors, etc. If omitted, Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test.  If `False`, the returned parameters are
      all Tensors, never Variables or DeferredTensor.

  Returns:
    bijectors: A strategy for drawing bijectors with the specified `batch_shape`
      (or an arbitrary one if omitted).
  """
  if bijector_name is None:
    bijector_name = draw(hps.sampled_from(TF2_FRIENDLY_BIJECTORS))
  if batch_shape is None:
    batch_shape = draw(tfp_hps.shapes())
  if event_dim is None:
    event_dim = draw(hps.integers(min_value=2, max_value=6))
  if bijector_name == 'Invert':
    underlying_name = draw(
        hps.sampled_from(sorted(set(TF2_FRIENDLY_BIJECTORS) - {'Invert'})))
    underlying = draw(
        bijectors(
            bijector_name=underlying_name,
            batch_shape=batch_shape,
            event_dim=event_dim,
            enable_vars=enable_vars))
    return tfb.Invert(underlying, validate_args=True)
  if bijector_name == 'Inline':
    if enable_vars:
      scale = tf.Variable(1., name='scale')
    else:
      scale = 2.
    b = tfb.AffineScalar(scale=scale)

    inline = tfb.Inline(
        forward_fn=b.forward,
        inverse_fn=b.inverse,
        forward_log_det_jacobian_fn=lambda x: b.forward_log_det_jacobian(  # pylint: disable=g-long-lambda
            x, event_ndims=b.forward_min_event_ndims),
        forward_min_event_ndims=b.forward_min_event_ndims,
        is_constant_jacobian=b.is_constant_jacobian,
    )
    inline.b = b
    return inline
  if bijector_name == 'DiscreteCosineTransform':
    dct_type = draw(hps.integers(min_value=2, max_value=3))
    return tfb.DiscreteCosineTransform(
        validate_args=True, dct_type=dct_type)
  if bijector_name == 'PowerTransform':
    power = draw(hps.floats(min_value=0., max_value=10.))
    return tfb.PowerTransform(validate_args=True, power=power)

  bijector_params = draw(
      broadcasting_params(bijector_name, batch_shape, event_dim=event_dim,
                          enable_vars=enable_vars))
  ctor = getattr(tfb, bijector_name)
  return ctor(validate_args=True, **bijector_params)