Exemplo n.º 1
0
 def testUniformStd(self):
   with self.test_session():
     a = 10.0
     b = 100.0
     uniform = uniform_lib.Uniform(a=a, b=b)
     s_uniform = stats.uniform(loc=a, scale=b - a)
     self.assertAllClose(uniform.stddev().eval(), s_uniform.std())
Exemplo n.º 2
0
 def testUniformVariance(self):
     with self.test_session():
         a = 10.0
         b = 100.0
         uniform = uniform_lib.Uniform(low=a, high=b)
         s_uniform = stats.uniform(loc=a, scale=b - a)
         self.assertAllClose(uniform.variance().eval(), s_uniform.var())
Exemplo n.º 3
0
  def _testUniformSampleMultiDimensional(self):
    # DISABLED: Please enable this test once b/issues/30149644 is resolved.
    with self.test_session():
      batch_size = 2
      a_v = [3.0, 22.0]
      b_v = [13.0, 35.0]
      a = constant_op.constant([a_v] * batch_size)
      b = constant_op.constant([b_v] * batch_size)

      uniform = uniform_lib.Uniform(a=a, b=b)

      n_v = 100000
      n = constant_op.constant(n_v)
      samples = uniform.sample(n)
      self.assertEqual(samples.get_shape(), (n_v, batch_size, 2))

      sample_values = samples.eval()

      self.assertFalse(
          np.any(sample_values[:, 0, 0] < a_v[0]) or
          np.any(sample_values[:, 0, 0] >= b_v[0]))
      self.assertFalse(
          np.any(sample_values[:, 0, 1] < a_v[1]) or
          np.any(sample_values[:, 0, 1] >= b_v[1]))

      self.assertAllClose(
          sample_values[:, 0, 0].mean(), (a_v[0] + b_v[0]) / 2, atol=1e-2)
      self.assertAllClose(
          sample_values[:, 0, 1].mean(), (a_v[1] + b_v[1]) / 2, atol=1e-2)
Exemplo n.º 4
0
 def testUniformSamplePdf(self):
   with self.test_session():
     a = 10.0
     b = [11.0, 100.0]
     uniform = uniform_lib.Uniform(a, b)
     self.assertTrue(
         math_ops.reduce_all(uniform.pdf(uniform.sample(10)) > 0).eval())
    def testUniformPDF(self):
        with self.test_session():
            a = constant_op.constant([-3.0] * 5 + [15.0])
            b = constant_op.constant([11.0] * 5 + [20.0])
            uniform = uniform_lib.Uniform(a=a, b=b)

            a_v = -3.0
            b_v = 11.0
            x = np.array([-10.5, 4.0, 0.0, 10.99, 11.3, 17.0],
                         dtype=np.float32)

            def _expected_pdf():
                pdf = np.zeros_like(x) + 1.0 / (b_v - a_v)
                pdf[x > b_v] = 0.0
                pdf[x < a_v] = 0.0
                pdf[5] = 1.0 / (20.0 - 15.0)
                return pdf

            expected_pdf = _expected_pdf()

            pdf = uniform.pdf(x)
            self.assertAllClose(expected_pdf, pdf.eval())

            log_pdf = uniform.log_pdf(x)
            self.assertAllClose(np.log(expected_pdf), log_pdf.eval())
Exemplo n.º 6
0
 def testUniformRange(self):
   with self.test_session():
     a = 3.0
     b = 10.0
     uniform = uniform_lib.Uniform(a=a, b=b)
     self.assertAllClose(a, uniform.a.eval())
     self.assertAllClose(b, uniform.b.eval())
     self.assertAllClose(b - a, uniform.range().eval())
Exemplo n.º 7
0
 def testUniformRange(self):
     with self.test_session():
         a = 3.0
         b = 10.0
         uniform = uniform_lib.Uniform(low=a, high=b)
         self.assertAllClose(a, uniform.low.eval())
         self.assertAllClose(b, uniform.high.eval())
         self.assertAllClose(b - a, uniform.range().eval())
Exemplo n.º 8
0
  def testUniformEntropy(self):
    with self.test_session():
      a_v = np.array([1.0, 1.0, 1.0])
      b_v = np.array([[1.5, 2.0, 3.0]])
      uniform = uniform_lib.Uniform(a=a_v, b=b_v)

      expected_entropy = np.log(b_v - a_v)
      self.assertAllClose(expected_entropy, uniform.entropy().eval())
Exemplo n.º 9
0
  def testUniformBroadcasting(self):
    with self.test_session():
      a = 10.0
      b = [11.0, 20.0]
      uniform = uniform_lib.Uniform(a, b)

      pdf = uniform.pdf([[10.5, 11.5], [9.0, 19.0], [10.5, 21.0]])
      expected_pdf = np.array([[1.0, 0.1], [0.0, 0.1], [1.0, 0.0]])
      self.assertAllClose(expected_pdf, pdf.eval())
Exemplo n.º 10
0
  def testUniformAssertMaxGtMin(self):
    with self.test_session():
      a_v = np.array([1.0, 1.0, 1.0], dtype=np.float32)
      b_v = np.array([1.0, 2.0, 3.0], dtype=np.float32)
      uniform = uniform_lib.Uniform(a=a_v, b=b_v, validate_args=True)

      with self.assertRaisesWithPredicateMatch(errors_impl.InvalidArgumentError,
                                               "x < y"):
        uniform.a.eval()
Exemplo n.º 11
0
  def testUniformShape(self):
    with self.test_session():
      a = constant_op.constant([-3.0] * 5)
      b = constant_op.constant(11.0)
      uniform = uniform_lib.Uniform(a=a, b=b)

      self.assertEqual(uniform.batch_shape().eval(), (5,))
      self.assertEqual(uniform.get_batch_shape(), tensor_shape.TensorShape([5]))
      self.assertAllEqual(uniform.event_shape().eval(), [])
      self.assertEqual(uniform.get_event_shape(), tensor_shape.TensorShape([]))
Exemplo n.º 12
0
  def testUniformPDFWithScalarEndpoint(self):
    with self.test_session():
      a = constant_op.constant([0.0, 5.0])
      b = constant_op.constant(10.0)
      uniform = uniform_lib.Uniform(a=a, b=b)

      x = np.array([0.0, 8.0], dtype=np.float32)
      expected_pdf = np.array([1.0 / (10.0 - 0.0), 1.0 / (10.0 - 5.0)])

      pdf = uniform.pdf(x)
      self.assertAllClose(expected_pdf, pdf.eval())
Exemplo n.º 13
0
  def testUniformNans(self):
    with self.test_session():
      a = 10.0
      b = [11.0, 100.0]
      uniform = uniform_lib.Uniform(a=a, b=b)

      no_nans = constant_op.constant(1.0)
      nans = constant_op.constant(0.0) / constant_op.constant(0.0)
      self.assertTrue(math_ops.is_nan(nans).eval())
      with_nans = array_ops.stack([no_nans, nans])

      pdf = uniform.pdf(with_nans)

      is_nan = math_ops.is_nan(pdf).eval()
      self.assertFalse(is_nan[0])
      self.assertTrue(is_nan[1])
Exemplo n.º 14
0
  def testUniformSampleWithShape(self):
    with self.test_session():
      a = 10.0
      b = [11.0, 20.0]
      uniform = uniform_lib.Uniform(a, b)

      pdf = uniform.pdf(uniform.sample((2, 3)))
      # pylint: disable=bad-continuation
      expected_pdf = [
          [[1.0, 0.1], [1.0, 0.1], [1.0, 0.1]],
          [[1.0, 0.1], [1.0, 0.1], [1.0, 0.1]],
      ]
      # pylint: enable=bad-continuation
      self.assertAllClose(expected_pdf, pdf.eval())

      pdf = uniform.pdf(uniform.sample())
      expected_pdf = [1.0, 0.1]
      self.assertAllClose(expected_pdf, pdf.eval())
Exemplo n.º 15
0
  def testUniformSample(self):
    with self.test_session():
      a = constant_op.constant([3.0, 4.0])
      b = constant_op.constant(13.0)
      a1_v = 3.0
      a2_v = 4.0
      b_v = 13.0
      n = constant_op.constant(100000)
      uniform = uniform_lib.Uniform(a=a, b=b)

      samples = uniform.sample(n, seed=137)
      sample_values = samples.eval()
      self.assertEqual(sample_values.shape, (100000, 2))
      self.assertAllClose(
          sample_values[::, 0].mean(), (b_v + a1_v) / 2, atol=1e-2)
      self.assertAllClose(
          sample_values[::, 1].mean(), (b_v + a2_v) / 2, atol=1e-2)
      self.assertFalse(
          np.any(sample_values[::, 0] < a1_v) or np.any(sample_values >= b_v))
      self.assertFalse(
          np.any(sample_values[::, 1] < a2_v) or np.any(sample_values >= b_v))
Exemplo n.º 16
0
  def testUniformCDF(self):
    with self.test_session():
      batch_size = 6
      a = constant_op.constant([1.0] * batch_size)
      b = constant_op.constant([11.0] * batch_size)
      a_v = 1.0
      b_v = 11.0
      x = np.array([-2.5, 2.5, 4.0, 0.0, 10.99, 12.0], dtype=np.float32)

      uniform = uniform_lib.Uniform(a=a, b=b)

      def _expected_cdf():
        cdf = (x - a_v) / (b_v - a_v)
        cdf[x >= b_v] = 1
        cdf[x < a_v] = 0
        return cdf

      cdf = uniform.cdf(x)
      self.assertAllClose(_expected_cdf(), cdf.eval())

      log_cdf = uniform.log_cdf(x)
      self.assertAllClose(np.log(_expected_cdf()), log_cdf.eval())
Exemplo n.º 17
0
def assert_scalar_congruency(bijector,
                             lower_x,
                             upper_x,
                             n=int(10e3),
                             rtol=0.01,
                             sess=None):
    """Assert `bijector`'s forward/inverse/inverse_log_det_jacobian are congruent.

  We draw samples `X ~ U(lower_x, upper_x)`, then feed these through the
  `bijector` in order to check that:

  1. the forward is strictly monotonic.
  2. the forward/inverse methods are inverses of each other.
  3. the jacobian is the correct change of measure.

  This can only be used for a Bijector mapping open subsets of the real line
  to themselves.  This is due to the fact that this test compares the `prob`
  before/after transformation with the Lebesgue measure on the line.

  Args:
    bijector:  Instance of Bijector
    lower_x:  Python scalar.
    upper_x:  Python scalar.  Must have `lower_x < upper_x`, and both must be in
      the domain of the `bijector`.  The `bijector` should probably not produce
      huge variation in values in the interval `(lower_x, upper_x)`, or else
      the variance based check of the Jacobian will require small `rtol` or
      huge `n`.
    n:  Number of samples to draw for the checks.
    rtol:  Positive number.  Used for the Jacobian check.
    sess:  `tf.Session`.  Defaults to the default session.

  Raises:
    AssertionError:  If tests fail.
  """

    # Checks and defaults.
    assert bijector.event_ndims.eval() == 0
    if sess is None:
        sess = ops.get_default_session()

    # Should be monotonic over this interval
    ten_x_pts = np.linspace(lower_x, upper_x, num=10).astype(np.float32)
    if bijector.dtype is not None:
        ten_x_pts = ten_x_pts.astype(bijector.dtype.as_numpy_dtype)
    forward_on_10_pts = bijector.forward(ten_x_pts)

    # Set the lower/upper limits in the range of the bijector.
    lower_y, upper_y = sess.run(
        [bijector.forward(lower_x),
         bijector.forward(upper_x)])
    if upper_y < lower_y:  # If bijector.forward is a decreasing function.
        lower_y, upper_y = upper_y, lower_y

    # Uniform samples from the domain, range.
    uniform_x_samps = uniform_lib.Uniform(low=lower_x,
                                          high=upper_x).sample(n, seed=0)
    uniform_y_samps = uniform_lib.Uniform(low=lower_y,
                                          high=upper_y).sample(n, seed=1)

    # These compositions should be the identity.
    inverse_forward_x = bijector.inverse(bijector.forward(uniform_x_samps))
    forward_inverse_y = bijector.forward(bijector.inverse(uniform_y_samps))

    # For a < b, and transformation y = y(x),
    # (b - a) = \int_a^b dx = \int_{y(a)}^{y(b)} |dx/dy| dy
    # "change_measure_dy_dx" below is a Monte Carlo approximation to the right
    # hand side, which should then be close to the left, which is (b - a).
    dy_dx = math_ops.exp(bijector.inverse_log_det_jacobian(uniform_y_samps))
    # E[|dx/dy|] under Uniform[lower_y, upper_y]
    # = \int_{y(a)}^{y(b)} |dx/dy| dP(u), where dP(u) is the uniform measure
    expectation_of_dy_dx_under_uniform = math_ops.reduce_mean(dy_dx)
    # dy = dP(u) * (upper_y - lower_y)
    change_measure_dy_dx = ((upper_y - lower_y) *
                            expectation_of_dy_dx_under_uniform)

    # We'll also check that dy_dx = 1 / dx_dy.
    dx_dy = math_ops.exp(
        bijector.forward_log_det_jacobian(bijector.inverse(uniform_y_samps)))

    [
        forward_on_10_pts_v,
        dy_dx_v,
        dx_dy_v,
        change_measure_dy_dx_v,
        uniform_x_samps_v,
        uniform_y_samps_v,
        inverse_forward_x_v,
        forward_inverse_y_v,
    ] = sess.run([
        forward_on_10_pts,
        dy_dx,
        dx_dy,
        change_measure_dy_dx,
        uniform_x_samps,
        uniform_y_samps,
        inverse_forward_x,
        forward_inverse_y,
    ])

    assert_strictly_monotonic(forward_on_10_pts_v)
    # Composition of forward/inverse should be the identity.
    np.testing.assert_allclose(inverse_forward_x_v,
                               uniform_x_samps_v,
                               atol=1e-5,
                               rtol=1e-3)
    np.testing.assert_allclose(forward_inverse_y_v,
                               uniform_y_samps_v,
                               atol=1e-5,
                               rtol=1e-3)
    # Change of measure should be correct.
    np.testing.assert_allclose(upper_x - lower_x,
                               change_measure_dy_dx_v,
                               atol=0,
                               rtol=rtol)
    # Inverse Jacobian should be equivalent to the reciprocal of the forward
    # Jacobian.
    np.testing.assert_allclose(dy_dx_v,
                               np.divide(1., dx_dy_v),
                               atol=1e-5,
                               rtol=1e-3)