Example #1
0
    def testUniformSampleWithShape(self):
        with self.test_session():
            a = 10.0
            b = [11.0, 20.0]
            uniform = uniform_lib.Uniform(a, b)

            pdf = uniform.prob(uniform.sample((2, 3)))
            # pylint: disable=bad-continuation
            expected_pdf = [
                [[1.0, 0.1], [1.0, 0.1], [1.0, 0.1]],
                [[1.0, 0.1], [1.0, 0.1], [1.0, 0.1]],
            ]
            # pylint: enable=bad-continuation
            self.assertAllClose(expected_pdf, self.evaluate(pdf))

            pdf = uniform.prob(uniform.sample())
            expected_pdf = [1.0, 0.1]
            self.assertAllClose(expected_pdf, self.evaluate(pdf))
Example #2
0
    def __init__(self,
                 concentration1=None,
                 concentration0=None,
                 validate_args=False,
                 allow_nan_stats=True,
                 name="Kumaraswamy"):
        """Initialize a batch of Kumaraswamy distributions.

    Args:
      concentration1: Positive floating-point `Tensor` indicating mean
        number of successes; aka "alpha". Implies `self.dtype` and
        `self.batch_shape`, i.e.,
        `concentration1.shape = [N1, N2, ..., Nm] = self.batch_shape`.
      concentration0: Positive floating-point `Tensor` indicating mean
        number of failures; aka "beta". Otherwise has same semantics as
        `concentration1`.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
        (e.g., mean, mode, variance) use the value "`NaN`" to indicate the
        result is undefined. When `False`, an exception is raised if one or
        more of the statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.
    """
        with ops.name_scope(name, values=[concentration1,
                                          concentration0]) as name:
            concentration1 = ops.convert_to_tensor(concentration1,
                                                   name="concentration1")
            concentration0 = ops.convert_to_tensor(concentration0,
                                                   name="concentration0")
        super(Kumaraswamy, self).__init__(
            distribution=uniform.Uniform(
                low=array_ops.zeros([], dtype=concentration1.dtype),
                high=array_ops.ones([], dtype=concentration1.dtype),
                allow_nan_stats=allow_nan_stats),
            bijector=bijectors.Kumaraswamy(concentration1=concentration1,
                                           concentration0=concentration0,
                                           validate_args=validate_args),
            batch_shape=distribution_util.get_broadcast_shape(
                concentration1, concentration0),
            name=name)
        self._reparameterization_type = distribution.FULLY_REPARAMETERIZED
    def testUniformCDF(self):
        batch_size = 6
        a = constant_op.constant([1.0] * batch_size)
        b = constant_op.constant([11.0] * batch_size)
        a_v = 1.0
        b_v = 11.0
        x = np.array([-2.5, 2.5, 4.0, 0.0, 10.99, 12.0], dtype=np.float32)

        uniform = uniform_lib.Uniform(low=a, high=b)

        def _expected_cdf():
            cdf = (x - a_v) / (b_v - a_v)
            cdf[x >= b_v] = 1
            cdf[x < a_v] = 0
            return cdf

        cdf = uniform.cdf(x)
        self.assertAllClose(_expected_cdf(), self.evaluate(cdf))

        log_cdf = uniform.log_cdf(x)
        self.assertAllClose(np.log(_expected_cdf()), self.evaluate(log_cdf))
  def testUniformSample(self):
    with self.test_session():
      a = constant_op.constant([3.0, 4.0])
      b = constant_op.constant(13.0)
      a1_v = 3.0
      a2_v = 4.0
      b_v = 13.0
      n = constant_op.constant(100000)
      uniform = uniform_lib.Uniform(low=a, high=b)

      samples = uniform.sample(n, seed=137)
      sample_values = samples.eval()
      self.assertEqual(sample_values.shape, (100000, 2))
      self.assertAllClose(
          sample_values[::, 0].mean(), (b_v + a1_v) / 2, atol=1e-2)
      self.assertAllClose(
          sample_values[::, 1].mean(), (b_v + a2_v) / 2, atol=1e-2)
      self.assertFalse(
          np.any(sample_values[::, 0] < a1_v) or np.any(sample_values >= b_v))
      self.assertFalse(
          np.any(sample_values[::, 1] < a2_v) or np.any(sample_values >= b_v))
Example #5
0
def _uniform_correlation_like_matrix(num_rows, batch_shape, dtype, seed):
    """Returns a uniformly random `Tensor` of "correlation-like" matrices.

  A "correlation-like" matrix is a symmetric square matrix with all entries
  between -1 and 1 (inclusive) and 1s on the main diagonal.  Of these,
  the ones that are positive semi-definite are exactly the correlation
  matrices.

  Args:
    num_rows: Python `int` dimension of the correlation-like matrices.
    batch_shape: `Tensor` or Python `tuple` of `int` shape of the
      batch to return.
    dtype: `dtype` of the `Tensor` to return.
    seed: Random seed.

  Returns:
    matrices: A `Tensor` of shape `batch_shape + [num_rows, num_rows]`
      and dtype `dtype`.  Each entry is in [-1, 1], and each matrix
      along the bottom two dimensions is symmetric and has 1s on the
      main diagonal.
  """
    num_entries = num_rows * (num_rows + 1) / 2
    ones = array_ops.ones(shape=[num_entries], dtype=dtype)
    # It seems wasteful to generate random values for the diagonal since
    # I am going to throw them away, but `fill_triangular` fills the
    # diagonal, so I probably need them.
    # It's not impossible that it would be more efficient to just fill
    # the whole matrix with random values instead of messing with
    # `fill_triangular`.  Then would need to filter almost half out with
    # `matrix_band_part`.
    unifs = uniform.Uniform(-ones, ones).sample(batch_shape, seed=seed)
    tril = util.fill_triangular(unifs)
    symmetric = tril + array_ops.matrix_transpose(tril)
    diagonal_ones = array_ops.ones(shape=util.pad(batch_shape,
                                                  axis=0,
                                                  back=True,
                                                  value=num_rows),
                                   dtype=dtype)
    return array_ops.matrix_set_diag(symmetric, diagonal_ones)
    def testUniformPDF(self):
        a = constant_op.constant([-3.0] * 5 + [15.0])
        b = constant_op.constant([11.0] * 5 + [20.0])
        uniform = uniform_lib.Uniform(low=a, high=b)

        a_v = -3.0
        b_v = 11.0
        x = np.array([-10.5, 4.0, 0.0, 10.99, 11.3, 17.0], dtype=np.float32)

        def _expected_pdf():
            pdf = np.zeros_like(x) + 1.0 / (b_v - a_v)
            pdf[x > b_v] = 0.0
            pdf[x < a_v] = 0.0
            pdf[5] = 1.0 / (20.0 - 15.0)
            return pdf

        expected_pdf = _expected_pdf()

        pdf = uniform.prob(x)
        self.assertAllClose(expected_pdf, self.evaluate(pdf))

        log_pdf = uniform.log_prob(x)
        self.assertAllClose(np.log(expected_pdf), self.evaluate(log_pdf))
def assert_scalar_congruency(bijector,
                             lower_x,
                             upper_x,
                             n=int(10e3),
                             rtol=0.01,
                             sess=None):
  """Assert `bijector`'s forward/inverse/inverse_log_det_jacobian are congruent.

  We draw samples `X ~ U(lower_x, upper_x)`, then feed these through the
  `bijector` in order to check that:

  1. the forward is strictly monotonic.
  2. the forward/inverse methods are inverses of each other.
  3. the jacobian is the correct change of measure.

  This can only be used for a Bijector mapping open subsets of the real line
  to themselves.  This is due to the fact that this test compares the `prob`
  before/after transformation with the Lebesgue measure on the line.

  Args:
    bijector:  Instance of Bijector
    lower_x:  Python scalar.
    upper_x:  Python scalar.  Must have `lower_x < upper_x`, and both must be in
      the domain of the `bijector`.  The `bijector` should probably not produce
      huge variation in values in the interval `(lower_x, upper_x)`, or else
      the variance based check of the Jacobian will require small `rtol` or
      huge `n`.
    n:  Number of samples to draw for the checks.
    rtol:  Positive number.  Used for the Jacobian check.
    sess:  `tf.Session`.  Defaults to the default session.

  Raises:
    AssertionError:  If tests fail.
  """

  # Checks and defaults.
  assert bijector.event_ndims.eval() == 0
  if sess is None:
    sess = ops.get_default_session()

  # Should be monotonic over this interval
  ten_x_pts = np.linspace(lower_x, upper_x, num=10).astype(np.float32)
  if bijector.dtype is not None:
    ten_x_pts = ten_x_pts.astype(bijector.dtype.as_numpy_dtype)
  forward_on_10_pts = bijector.forward(ten_x_pts)

  # Set the lower/upper limits in the range of the bijector.
  lower_y, upper_y = sess.run(
      [bijector.forward(lower_x), bijector.forward(upper_x)])
  if upper_y < lower_y:  # If bijector.forward is a decreasing function.
    lower_y, upper_y = upper_y, lower_y

  # Uniform samples from the domain, range.
  uniform_x_samps = uniform_lib.Uniform(
      low=lower_x, high=upper_x).sample(n, seed=0)
  uniform_y_samps = uniform_lib.Uniform(
      low=lower_y, high=upper_y).sample(n, seed=1)

  # These compositions should be the identity.
  inverse_forward_x = bijector.inverse(bijector.forward(uniform_x_samps))
  forward_inverse_y = bijector.forward(bijector.inverse(uniform_y_samps))

  # For a < b, and transformation y = y(x),
  # (b - a) = \int_a^b dx = \int_{y(a)}^{y(b)} |dx/dy| dy
  # "change_measure_dy_dx" below is a Monte Carlo approximation to the right
  # hand side, which should then be close to the left, which is (b - a).
  dy_dx = math_ops.exp(bijector.inverse_log_det_jacobian(uniform_y_samps))
  # E[|dx/dy|] under Uniform[lower_y, upper_y]
  # = \int_{y(a)}^{y(b)} |dx/dy| dP(u), where dP(u) is the uniform measure
  expectation_of_dy_dx_under_uniform = math_ops.reduce_mean(dy_dx)
  # dy = dP(u) * (upper_y - lower_y)
  change_measure_dy_dx = (
      (upper_y - lower_y) * expectation_of_dy_dx_under_uniform)

  # We'll also check that dy_dx = 1 / dx_dy.
  dx_dy = math_ops.exp(
      bijector.forward_log_det_jacobian(bijector.inverse(uniform_y_samps)))

  [
      forward_on_10_pts_v,
      dy_dx_v,
      dx_dy_v,
      change_measure_dy_dx_v,
      uniform_x_samps_v,
      uniform_y_samps_v,
      inverse_forward_x_v,
      forward_inverse_y_v,
  ] = sess.run([
      forward_on_10_pts,
      dy_dx,
      dx_dy,
      change_measure_dy_dx,
      uniform_x_samps,
      uniform_y_samps,
      inverse_forward_x,
      forward_inverse_y,
  ])

  assert_strictly_monotonic(forward_on_10_pts_v)
  # Composition of forward/inverse should be the identity.
  np.testing.assert_allclose(
      inverse_forward_x_v, uniform_x_samps_v, atol=1e-5, rtol=1e-3)
  np.testing.assert_allclose(
      forward_inverse_y_v, uniform_y_samps_v, atol=1e-5, rtol=1e-3)
  # Change of measure should be correct.
  np.testing.assert_allclose(
      upper_x - lower_x, change_measure_dy_dx_v, atol=0, rtol=rtol)
  # Inverse Jacobian should be equivalent to the reciprocal of the forward
  # Jacobian.
  np.testing.assert_allclose(
      dy_dx_v, np.divide(1., dx_dy_v), atol=1e-5, rtol=1e-3)