Ejemplo n.º 1
0
  def test_dkwm_argument_validity_checking(self, dtype):
    rng = np.random.RandomState(seed=0)
    samples = rng.uniform(
        low=[0., 1.], high=[1., 2.], size=(2500, 1, 2)).astype(dtype=dtype)

    # Test that the test library complains if the given samples fall
    # outside the purported bounds.
    with self.assertRaisesOpError('maximum value exceeds expectations'):
      self.evaluate(st.true_mean_confidence_interval_by_dkwm(
          samples, [[0., 1.]], [[0.5, 1.5]], error_rate=0.5))
    with self.assertRaisesOpError('minimum value falls below expectations'):
      self.evaluate(st.true_mean_confidence_interval_by_dkwm(
          samples, [[0.5, 1.5]], [[1., 2.]], error_rate=0.5))

    # But doesn't complain if they don't.
    op = st.true_mean_confidence_interval_by_dkwm(
        samples, [[0., 1.]], [[1., 2.]], error_rate=0.5)
    _ = self.evaluate(op)
  def test_dkwm_argument_validity_checking(self):
    rng = np.random.RandomState(seed=0)
    samples = rng.uniform(
        low=[0., 1.], high=[1., 2.], size=(2500, 1, 2)).astype(np.float32)

    # Test that the test library complains if the given samples fall
    # outside the purported bounds.
    with self.assertRaisesOpError("maximum value exceeds expectations"):
      self.evaluate(st.true_mean_confidence_interval_by_dkwm(
          samples, [[0., 1.]], [[0.5, 1.5]], error_rate=0.5))
    with self.assertRaisesOpError("minimum value falls below expectations"):
      self.evaluate(st.true_mean_confidence_interval_by_dkwm(
          samples, [[0.5, 1.5]], [[1., 2.]], error_rate=0.5))

    # But doesn't complain if they don't.
    op = st.true_mean_confidence_interval_by_dkwm(
        samples, [[0., 1.]], [[1., 2.]], error_rate=0.5)
    _ = self.evaluate(op)
Ejemplo n.º 3
0
  def test_true_mean_confidence_interval_by_dkwm_one_sample(self, dtype):
    rng = np.random.RandomState(seed=0)

    num_samples = 5000
    # 5000 samples is chosen to be enough to find discrepancies of
    # size 0.1 or more with assurance 1e-6, as confirmed here:
    d = st.min_discrepancy_of_true_means_detectable_by_dkwm(
        num_samples, 0., 1., false_fail_rate=1e-6, false_pass_rate=1e-6)
    d = self.evaluate(d)
    self.assertLess(d, 0.1)

    # Test that the confidence interval computed for the mean includes
    # 0.5 and excludes 0.4 and 0.6.
    samples = rng.uniform(size=num_samples).astype(dtype=dtype)
    (low, high) = st.true_mean_confidence_interval_by_dkwm(
        samples, 0., 1., error_rate=1e-6)
    low, high = self.evaluate([low, high])
    self.assertGreater(low, 0.4)
    self.assertLess(low, 0.5)
    self.assertGreater(high, 0.5)
    self.assertLess(high, 0.6)
  def test_true_mean_confidence_interval_by_dkwm_one_sample(self):
    rng = np.random.RandomState(seed=0)

    num_samples = 5000
    # 5000 samples is chosen to be enough to find discrepancies of
    # size 0.1 or more with assurance 1e-6, as confirmed here:
    d = st.min_discrepancy_of_true_means_detectable_by_dkwm(
        num_samples, 0., 1., false_fail_rate=1e-6, false_pass_rate=1e-6)
    d = self.evaluate(d)
    self.assertLess(d, 0.1)

    # Test that the confidence interval computed for the mean includes
    # 0.5 and excludes 0.4 and 0.6.
    samples = rng.uniform(size=num_samples).astype(np.float32)
    (low, high) = st.true_mean_confidence_interval_by_dkwm(
        samples, 0., 1., error_rate=1e-6)
    low, high = self.evaluate([low, high])
    self.assertGreater(low, 0.4)
    self.assertLess(low, 0.5)
    self.assertGreater(high, 0.5)
    self.assertLess(high, 0.6)