def testTriangularSample(self): low = self._dtype([-3.] * 4) high = np.arange(7., 11., dtype=self._dtype) peak = np.array([0.] * 4, dtype=self._dtype) tri = self._create_triangular_dist(low, high, peak) num_samples = int(3e6) samples = tri.sample(num_samples, seed=tfp_test_util.test_seed()) detectable_discrepancies = self.evaluate( st.min_discrepancy_of_true_means_detectable_by_dkwm( num_samples, low, high, false_fail_rate=self._dtype(1e-6), false_pass_rate=self._dtype(1e-6))) below_threshold = detectable_discrepancies <= 0.05 self.assertTrue(np.all(below_threshold)) self.evaluate( st.assert_true_mean_equal_by_dkwm( samples, low=low, high=high, expected=tri.mean(), false_fail_rate=self._dtype(1e-6)))
def testRejection2D(self): num_samples = int(1e5) # Chosen for a small min detectable discrepancy det_bounds = np.array( [0.01, 0.02, 0.03, 0.04, 0.05, 0.3, 0.35, 0.4, 0.5], dtype=np.float32) exact_volumes = two_by_two_volume(det_bounds) (rej_weights, rej_proposal_volume ) = corr.correlation_matrix_volume_rejection_samples(det_bounds, 2, [num_samples, 9], dtype=np.float32, seed=43) # shape of rej_weights: [num_samples, 9, 2, 2] chk1 = st.assert_true_mean_equal_by_dkwm(rej_weights, low=0., high=rej_proposal_volume, expected=exact_volumes, false_fail_rate=1e-6) chk2 = tf1.assert_less( st.min_discrepancy_of_true_means_detectable_by_dkwm( num_samples, low=0., high=rej_proposal_volume, # Correct the false fail rate due to different broadcasting false_fail_rate=1.1e-7, false_pass_rate=1e-6), 0.036) with tf.control_dependencies([chk1, chk2]): rej_weights = tf.identity(rej_weights) self.evaluate(rej_weights)
def testMeanHigherDimension(self, dtype): testee_lkj = tfd.LKJ(dimension=6, concentration=dtype([1., 3., 5.]), validate_args=True) num_samples = 20000 results = testee_lkj.sample(sample_shape=[num_samples], seed=test_util.test_seed()) mean = testee_lkj.mean() self.assertEqual(mean.shape, [3, 6, 6]) # tfd.LKJ has some small numerical issues, so we allow for some amount of # numerical tolerance when testing means. numerical_tolerance = 1e-5 check1 = st.assert_true_mean_in_interval_by_dkwm( samples=results, low=-1., high=1., expected_low=mean - numerical_tolerance, expected_high=mean + numerical_tolerance, false_fail_rate=1e-6) check2 = assert_util.assert_less( st.min_discrepancy_of_true_means_detectable_by_dkwm( num_samples, low=-1., high=1., # Smaller false fail rate because of different batch sizes between # these two checks. false_fail_rate=1e-7, false_pass_rate=1e-6), # 4% relative error 0.08) self.evaluate([check1, check2])
def testMean(self, dtype): testee_lkj = tfd.LKJ(dimension=3, concentration=dtype([1., 3., 5.]), validate_args=True) num_samples = 20000 results = testee_lkj.sample(sample_shape=[num_samples]) mean = testee_lkj.mean() self.assertEqual(mean.shape, [3, 3, 3]) check1 = st.assert_true_mean_equal_by_dkwm(samples=results, low=-1., high=1., expected=mean, false_fail_rate=1e-6) check2 = assert_util.assert_less( st.min_discrepancy_of_true_means_detectable_by_dkwm( num_samples, low=-1., high=1., # Smaller false fail rate because of different batch sizes between # these two checks. false_fail_rate=1e-7, false_pass_rate=1e-6), # 4% relative error 0.08) self.evaluate([check1, check2])
def VerifyMean(self, dim): num_samples = int(7e4) uniform = tfp.distributions.SphericalUniform(batch_shape=[2, 1], dimension=dim, dtype=self.dtype, validate_args=True, allow_nan_stats=False) samples = uniform.sample(num_samples, seed=test_util.test_seed()) sample_mean = tf.reduce_mean(samples, axis=0) true_mean, sample_mean = self.evaluate([uniform.mean(), sample_mean]) check1 = st.assert_true_mean_equal_by_dkwm(samples=samples, low=-(1. + 1e-7), high=1. + 1e-7, expected=true_mean, false_fail_rate=1e-6) check2 = assert_util.assert_less( st.min_discrepancy_of_true_means_detectable_by_dkwm( num_samples, low=-1., high=1., # Smaller false fail rate because of different batch sizes between # these two checks. false_fail_rate=1e-7, false_pass_rate=1e-6), # 4% relative error 0.08) self.evaluate([check1, check2])
def test_dkwm_design_mean_one_sample_soundness(self): thresholds = [1e-5, 1e-2, 1.1e-1, 0.9, 1., 1.02, 2., 10., 1e2, 1e5, 1e10] rates = [1e-6, 1e-3, 1e-2, 1.1e-1, 0.2, 0.5, 0.7, 1.] false_fail_rates, false_pass_rates = np.meshgrid(rates, rates) false_fail_rates = false_fail_rates.flatten().astype(np.float32) false_pass_rates = false_pass_rates.flatten().astype(np.float32) detectable_discrepancies = [] for false_pass_rate, false_fail_rate in zip( false_pass_rates, false_fail_rates): sufficient_n = st.min_num_samples_for_dkwm_mean_test( thresholds, low=0., high=1., false_fail_rate=false_fail_rate, false_pass_rate=false_pass_rate) detectable_discrepancies.append( st.min_discrepancy_of_true_means_detectable_by_dkwm( sufficient_n, low=0., high=1., false_fail_rate=false_fail_rate, false_pass_rate=false_pass_rate)) detectable_discrepancies_ = self.evaluate(detectable_discrepancies) for discrepancies, false_pass_rate, false_fail_rate in zip( detectable_discrepancies_, false_pass_rates, false_fail_rates): below_threshold = discrepancies <= thresholds self.assertAllEqual( np.ones_like(below_threshold, np.bool), below_threshold, msg='false_pass_rate({}), false_fail_rate({})'.format( false_pass_rate, false_fail_rate))
def _testSampleLogProbExact( self, concentrations, det_bounds, dim, means, num_samples=int(1e5), dtype=np.float32, target_discrepancy=0.1, seed=42): # For test methodology see the comment in # _testSampleConsistentLogProbInterval, except that this test # checks those parameter settings where the true volume is known # analytically. concentration = np.array(concentrations, dtype=dtype) det_bounds = np.array(det_bounds, dtype=dtype) means = np.array(means, dtype=dtype) # Add a tolerance to guard against some of the importance_weights exceeding # the theoretical maximum (importance_maxima) due to numerical inaccuracies # while lower bounding the determinant. See corresponding comment in # _testSampleConsistentLogProbInterval. high_tolerance = 1e-6 testee_lkj = tfd.LKJ( dimension=dim, concentration=concentration, validate_args=True) x = testee_lkj.sample(num_samples, seed=seed) importance_weights = ( tf.exp(-testee_lkj.log_prob(x)) * _det_ok_mask(x, det_bounds)) importance_maxima = (1. / det_bounds) ** (concentration - 1) * tf.exp( testee_lkj._log_normalization()) chk1 = st.assert_true_mean_equal_by_dkwm( importance_weights, low=0., high=importance_maxima + high_tolerance, expected=means, false_fail_rate=1e-6) chk2 = tf.assert_less( st.min_discrepancy_of_true_means_detectable_by_dkwm( num_samples, low=0., high=importance_maxima + high_tolerance, false_fail_rate=1e-6, false_pass_rate=1e-6), dtype(target_discrepancy)) self.evaluate([chk1, chk2])
def test_dkwm_design_mean_one_sample_soundness(self): thresholds = [ 1e-5, 1e-2, 1.1e-1, 0.9, 1., 1.02, 2., 10., 1e2, 1e5, 1e10 ] rates = [1e-6, 1e-3, 1e-2, 1.1e-1, 0.2, 0.5, 0.7, 1.] false_fail_rates, false_pass_rates = np.meshgrid(rates, rates) false_fail_rates = false_fail_rates.flatten().astype(np.float32) false_pass_rates = false_pass_rates.flatten().astype(np.float32) detectable_discrepancies = [] for false_pass_rate, false_fail_rate in zip(false_pass_rates, false_fail_rates): sufficient_n = st.min_num_samples_for_dkwm_mean_test( thresholds, low=0., high=1., false_fail_rate=false_fail_rate, false_pass_rate=false_pass_rate) detectable_discrepancies.append( st.min_discrepancy_of_true_means_detectable_by_dkwm( sufficient_n, low=0., high=1., false_fail_rate=false_fail_rate, false_pass_rate=false_pass_rate)) detectable_discrepancies_ = self.evaluate(detectable_discrepancies) for discrepancies, false_pass_rate, false_fail_rate in zip( detectable_discrepancies_, false_pass_rates, false_fail_rates): below_threshold = discrepancies <= thresholds self.assertAllEqual( np.ones_like(below_threshold, np.bool), below_threshold, msg='false_pass_rate({}), false_fail_rate({})'.format( false_pass_rate, false_fail_rate))
def testRejection4D(self): num_samples = int(1e5) # Chosen for a small min detectable discrepancy det_bounds = np.array([0.0], dtype=np.float32) exact_volumes = [four_by_four_volume()] (rej_weights, rej_proposal_volume ) = corr.correlation_matrix_volume_rejection_samples(det_bounds, 4, [num_samples, 1], dtype=np.float32, seed=45) # shape of rej_weights: [num_samples, 1, 4, 4] chk1 = st.assert_true_mean_equal_by_dkwm(rej_weights, low=0., high=rej_proposal_volume, expected=exact_volumes, false_fail_rate=1e-6) chk2 = tf1.assert_less( st.min_discrepancy_of_true_means_detectable_by_dkwm( num_samples, low=0., high=rej_proposal_volume, false_fail_rate=1e-6, false_pass_rate=1e-6), # Going for about a 10% relative error 1.1) with tf.control_dependencies([chk1, chk2]): rej_weights = tf.identity(rej_weights) self.evaluate(rej_weights)
def _testSampleLogProbExact(self, concentrations, det_bounds, dim, means, num_samples=int(1e5), dtype=np.float32, target_discrepancy=0.1, input_output_cholesky=False, seed=42): # For test methodology see the comment in # _testSampleConsistentLogProbInterval, except that this test # checks those parameter settings where the true volume is known # analytically. concentration = np.array(concentrations, dtype=dtype) det_bounds = np.array(det_bounds, dtype=dtype) means = np.array(means, dtype=dtype) # Add a tolerance to guard against some of the importance_weights exceeding # the theoretical maximum (importance_maxima) due to numerical inaccuracies # while lower bounding the determinant. See corresponding comment in # _testSampleConsistentLogProbInterval. high_tolerance = 1e-6 testee_lkj = tfd.LKJ(dimension=dim, concentration=concentration, input_output_cholesky=input_output_cholesky, validate_args=True) x = testee_lkj.sample(num_samples, seed=seed) importance_weights = ( tf.exp(-testee_lkj.log_prob(x)) * _det_ok_mask(x, det_bounds, input_output_cholesky)) importance_maxima = (1. / det_bounds)**(concentration - 1) * tf.exp( testee_lkj._log_normalization()) chk1 = st.assert_true_mean_equal_by_dkwm(importance_weights, low=0., high=importance_maxima + high_tolerance, expected=means, false_fail_rate=1e-6) chk2 = assert_util.assert_less( st.min_discrepancy_of_true_means_detectable_by_dkwm( num_samples, low=0., high=importance_maxima + high_tolerance, false_fail_rate=1e-6, false_pass_rate=1e-6), dtype(target_discrepancy)) self.evaluate([chk1, chk2])
def testMean(self, dtype): testee_lkj = tfd.LKJ(dimension=3, concentration=dtype([1., 3., 5.])) num_samples = 20000 results = testee_lkj.sample(sample_shape=[num_samples]) mean = testee_lkj.mean() self.assertEqual(mean.shape, [3, 3, 3]) check1 = st.assert_true_mean_equal_by_dkwm( samples=results, low=-1., high=1., expected=mean, false_fail_rate=1e-6) check2 = tf.assert_less( st.min_discrepancy_of_true_means_detectable_by_dkwm( num_samples, low=-1., high=1., # Smaller false fail rate because of different batch sizes between # these two checks. false_fail_rate=1e-7, false_pass_rate=1e-6), # 4% relative error 0.08) self.evaluate([check1, check2])
def testTriangularSample(self): low = self._dtype([-3.] * 4) high = np.arange(7., 11., dtype=self._dtype) peak = np.array([0.] * 4, dtype=self._dtype) tri = self._create_triangular_dist(low, high, peak) num_samples = int(3e6) samples = tri.sample(num_samples, seed=123) detectable_discrepancies = self.evaluate( st.min_discrepancy_of_true_means_detectable_by_dkwm( num_samples, low, high, false_fail_rate=self._dtype(1e-6), false_pass_rate=self._dtype(1e-6))) below_threshold = detectable_discrepancies <= 0.05 self.assertTrue(np.all(below_threshold)) self.evaluate( st.assert_true_mean_equal_by_dkwm( samples, low=low, high=high, expected=tri.mean(), false_fail_rate=self._dtype(1e-6)))
def testRejection4D(self): num_samples = int(1e5) # Chosen for a small min detectable discrepancy det_bounds = np.array([0.0], dtype=np.float32) exact_volumes = [four_by_four_volume()] (rej_weights, rej_proposal_volume) = corr.correlation_matrix_volume_rejection_samples( det_bounds, 4, [num_samples, 1], dtype=np.float32, seed=45) # shape of rej_weights: [num_samples, 1, 4, 4] chk1 = st.assert_true_mean_equal_by_dkwm( rej_weights, low=0., high=rej_proposal_volume, expected=exact_volumes, false_fail_rate=1e-6) chk2 = tf.assert_less( st.min_discrepancy_of_true_means_detectable_by_dkwm( num_samples, low=0., high=rej_proposal_volume, false_fail_rate=1e-6, false_pass_rate=1e-6), # Going for about a 10% relative error 1.1) with tf.control_dependencies([chk1, chk2]): rej_weights = tf.identity(rej_weights) self.evaluate(rej_weights)
def testRejection2D(self): num_samples = int(1e5) # Chosen for a small min detectable discrepancy det_bounds = np.array( [0.01, 0.02, 0.03, 0.04, 0.05, 0.3, 0.35, 0.4, 0.5], dtype=np.float32) exact_volumes = two_by_two_volume(det_bounds) (rej_weights, rej_proposal_volume) = corr.correlation_matrix_volume_rejection_samples( det_bounds, 2, [num_samples, 9], dtype=np.float32, seed=43) # shape of rej_weights: [num_samples, 9, 2, 2] chk1 = st.assert_true_mean_equal_by_dkwm( rej_weights, low=0., high=rej_proposal_volume, expected=exact_volumes, false_fail_rate=1e-6) chk2 = tf.assert_less( st.min_discrepancy_of_true_means_detectable_by_dkwm( num_samples, low=0., high=rej_proposal_volume, # Correct the false fail rate due to different broadcasting false_fail_rate=1.1e-7, false_pass_rate=1e-6), 0.036) with tf.control_dependencies([chk1, chk2]): rej_weights = tf.identity(rej_weights) self.evaluate(rej_weights)
def test_true_mean_confidence_interval_by_dkwm_one_sample(self): rng = np.random.RandomState(seed=0) num_samples = 5000 # 5000 samples is chosen to be enough to find discrepancies of # size 0.1 or more with assurance 1e-6, as confirmed here: d = st.min_discrepancy_of_true_means_detectable_by_dkwm( num_samples, 0., 1., false_fail_rate=1e-6, false_pass_rate=1e-6) d = self.evaluate(d) self.assertLess(d, 0.1) # Test that the confidence interval computed for the mean includes # 0.5 and excludes 0.4 and 0.6. samples = rng.uniform(size=num_samples).astype(np.float32) (low, high) = st.true_mean_confidence_interval_by_dkwm( samples, 0., 1., error_rate=1e-6) low, high = self.evaluate([low, high]) self.assertGreater(low, 0.4) self.assertLess(low, 0.5) self.assertGreater(high, 0.5) self.assertLess(high, 0.6)
def test_true_mean_confidence_interval_by_dkwm_one_sample(self, dtype): rng = np.random.RandomState(seed=0) num_samples = 5000 # 5000 samples is chosen to be enough to find discrepancies of # size 0.1 or more with assurance 1e-6, as confirmed here: d = st.min_discrepancy_of_true_means_detectable_by_dkwm( num_samples, 0., 1., false_fail_rate=1e-6, false_pass_rate=1e-6) d = self.evaluate(d) self.assertLess(d, 0.1) # Test that the confidence interval computed for the mean includes # 0.5 and excludes 0.4 and 0.6. samples = rng.uniform(size=num_samples).astype(dtype=dtype) (low, high) = st.true_mean_confidence_interval_by_dkwm( samples, 0., 1., error_rate=1e-6) low, high = self.evaluate([low, high]) self.assertGreater(low, 0.4) self.assertLess(low, 0.5) self.assertGreater(high, 0.5) self.assertLess(high, 0.6)
def _testSampleLogProbExact(self, concentrations, det_bounds, dim, means, num_samples=int(1e5), target_discrepancy=0.1, seed=42): # For test methodology see the comment in # _testSampleConsistentLogProbInterval, except that this test # checks those parameter settings where the true volume is known # analytically. concentration = np.array(concentrations, dtype=np.float32) det_bounds = np.array(det_bounds, dtype=np.float32) means = np.array(means, dtype=np.float32) testee_lkj = tfd.LKJ(dimension=dim, concentration=concentration, validate_args=True) x = testee_lkj.sample(num_samples, seed=seed) importance_weights = (tf.exp(-testee_lkj.log_prob(x)) * _det_ok_mask(x, det_bounds)) importance_maxima = (1. / det_bounds)**(concentration - 1) * tf.exp( testee_lkj._log_normalization()) chk1 = st.assert_true_mean_equal_by_dkwm(importance_weights, low=0., high=importance_maxima, expected=means, false_fail_rate=1e-6) chk2 = tf.assert_less( st.min_discrepancy_of_true_means_detectable_by_dkwm( num_samples, low=0., high=importance_maxima, false_fail_rate=1e-6, false_pass_rate=1e-6), target_discrepancy) self.evaluate([chk1, chk2])
def _testSampleConsistentLogProbInterval(self, concentrations, det_bounds, dim, num_samples=int(1e5), dtype=np.float32, false_fail_rate=1e-6, target_discrepancy=0.1, seed=42): # Consider the set M of dim x dim correlation matrices whose # determinant exceeds some bound (rationale for bound forthwith). # - This is a (convex!) shape in dim * (dim - 1) / 2 dimensions # (because a correlation matrix is determined by its lower # triangle, and the main diagonal is all 1s). # - Further, M is contained entirely in the [-1,1] cube, # because no correlation can fall outside that interval. # # We have two different ways to estimate the volume of M: # - Importance sampling from the LKJ distribution # - Importance sampling from the uniform distribution on the cube # # This test checks that these two methods agree. However, because # the uniform proposal leads to many rejections (thus slowness), # those volumes are computed offline and the confidence intervals # are presented to this test procedure in the "volume_bounds" # table. # # Why place a lower bound on the determinant? Because for eta > 1, # the density of LKJ approaches 0 as the determinant approaches 0. # However, the test methodology requires an upper bound on the # improtance weights produced. Rejecting matrices with too-small # determinant (from both methods) allows me to supply that bound. # # I considered several alternative regions whose volume I might # know analytically (without having to do rejection). # - Option a: Some hypersphere guaranteed to be contained inside M. # - Con: I don't know a priori how to find a radius for it. # - Con: I still need a lower bound on the determinants that appear # in this sphere, and I don't know how to compute it. # - Option b: Some trapezoid given as the convex hull of the # nearly-extreme correlation matrices (i.e., those that partition # the variables into two strongly anti-correclated groups). # - Con: Would have to dig up n-d convex hull code to implement this. # - Con: Need to compute the volume of that convex hull. # - Con: Need a bound on the determinants of the matrices in that hull. # - Option c: Same thing, but with the matrices that make a single pair # of variables strongly correlated (or anti-correlated), and leaves # the others uncorrelated. # - Same cons, except that there is a determinant bound (which # felt pretty loose). lows = [dtype(volume_bounds[dim][db][0]) for db in det_bounds] highs = [dtype(volume_bounds[dim][db][1]) for db in det_bounds] concentration = np.array(concentrations, dtype=dtype) det_bounds = np.array(det_bounds, dtype=dtype) # Due to possible numerical inaccuracies while lower bounding the # determinant, the maximum of the importance weights may exceed the # theoretical maximum (importance_maxima). We add a tolerance to guard # against this. An alternative would have been to add a threshold while # filtering in _det_ok_mask, but that would affect the mean as well. high_tolerance = 1e-6 testee_lkj = tfd.LKJ(dimension=dim, concentration=concentration, validate_args=True) x = testee_lkj.sample(num_samples, seed=seed) importance_weights = (tf.exp(-testee_lkj.log_prob(x)) * _det_ok_mask(x, det_bounds)) importance_maxima = (1. / det_bounds)**(concentration - 1) * tf.exp( testee_lkj._log_normalization()) check1 = st.assert_true_mean_in_interval_by_dkwm( samples=importance_weights, low=0., high=importance_maxima + high_tolerance, expected_low=lows, expected_high=highs, false_fail_rate=false_fail_rate) check2 = tf.assert_less( st.min_discrepancy_of_true_means_detectable_by_dkwm( num_samples, low=0., high=importance_maxima + high_tolerance, false_fail_rate=false_fail_rate, false_pass_rate=false_fail_rate), dtype(target_discrepancy)) self.evaluate([check1, check2])
def _testSampleConsistentLogProbInterval( self, concentrations, det_bounds, dim, num_samples=int(1e5), dtype=np.float32, false_fail_rate=1e-6, target_discrepancy=0.1, seed=42): # Consider the set M of dim x dim correlation matrices whose # determinant exceeds some bound (rationale for bound forthwith). # - This is a (convex!) shape in dim * (dim - 1) / 2 dimensions # (because a correlation matrix is determined by its lower # triangle, and the main diagonal is all 1s). # - Further, M is contained entirely in the [-1,1] cube, # because no correlation can fall outside that interval. # # We have two different ways to estimate the volume of M: # - Importance sampling from the LKJ distribution # - Importance sampling from the uniform distribution on the cube # # This test checks that these two methods agree. However, because # the uniform proposal leads to many rejections (thus slowness), # those volumes are computed offline and the confidence intervals # are presented to this test procedure in the "volume_bounds" # table. # # Why place a lower bound on the determinant? Because for eta > 1, # the density of LKJ approaches 0 as the determinant approaches 0. # However, the test methodology requires an upper bound on the # improtance weights produced. Rejecting matrices with too-small # determinant (from both methods) allows me to supply that bound. # # I considered several alternative regions whose volume I might # know analytically (without having to do rejection). # - Option a: Some hypersphere guaranteed to be contained inside M. # - Con: I don't know a priori how to find a radius for it. # - Con: I still need a lower bound on the determinants that appear # in this sphere, and I don't know how to compute it. # - Option b: Some trapezoid given as the convex hull of the # nearly-extreme correlation matrices (i.e., those that partition # the variables into two strongly anti-correclated groups). # - Con: Would have to dig up n-d convex hull code to implement this. # - Con: Need to compute the volume of that convex hull. # - Con: Need a bound on the determinants of the matrices in that hull. # - Option c: Same thing, but with the matrices that make a single pair # of variables strongly correlated (or anti-correlated), and leaves # the others uncorrelated. # - Same cons, except that there is a determinant bound (which # felt pretty loose). lows = [dtype(volume_bounds[dim][db][0]) for db in det_bounds] highs = [dtype(volume_bounds[dim][db][1]) for db in det_bounds] concentration = np.array(concentrations, dtype=dtype) det_bounds = np.array(det_bounds, dtype=dtype) # Due to possible numerical inaccuracies while lower bounding the # determinant, the maximum of the importance weights may exceed the # theoretical maximum (importance_maxima). We add a tolerance to guard # against this. An alternative would have been to add a threshold while # filtering in _det_ok_mask, but that would affect the mean as well. high_tolerance = 1e-6 testee_lkj = tfd.LKJ( dimension=dim, concentration=concentration, validate_args=True) x = testee_lkj.sample(num_samples, seed=seed) importance_weights = ( tf.exp(-testee_lkj.log_prob(x)) * _det_ok_mask(x, det_bounds)) importance_maxima = (1. / det_bounds) ** (concentration - 1) * tf.exp( testee_lkj._log_normalization()) check1 = st.assert_true_mean_in_interval_by_dkwm( samples=importance_weights, low=0., high=importance_maxima + high_tolerance, expected_low=lows, expected_high=highs, false_fail_rate=false_fail_rate) check2 = tf.assert_less( st.min_discrepancy_of_true_means_detectable_by_dkwm( num_samples, low=0., high=importance_maxima + high_tolerance, false_fail_rate=false_fail_rate, false_pass_rate=false_fail_rate), dtype(target_discrepancy)) self.evaluate([check1, check2])