def testNdtriDynamicShape(self): """Verifies that ndtri computation is correct.""" p_ = np.linspace(0., 1., 50).astype(np.float32) p = tf1.placeholder_with_default(p_, shape=None) self.assertAllClose(sp_special.ndtri(p_), self.evaluate(special_math.ndtri(p)), atol=0.)
def testParamTensorFromProbs(self): x = tf.constant([0.1, 0.5, 0.4]) d = tfd.ProbitBernoulli(probs=x, validate_args=True) self.assertAllClose( *self.evaluate([special_math.ndtri(d.prob(1.)), d.probits_parameter()]), atol=0, rtol=1e-4) self.assertAllClose( *self.evaluate([d.prob(1.), d.probs_parameter()]), atol=0, rtol=1e-4)
def testNdtri(self): """Verifies that ndtri computation is correct.""" p = np.linspace(0., 1., 50).astype(np.float64) # Quantile performs piecewise rational approximation so adding some # sp_special input values to make sure we hit all the pieces. p = np.hstack( (p, np.exp(-32), 1. - np.exp(-32), np.exp(-2), 1. - np.exp(-2))) expected_x = sp_special.ndtri(p) x = special_math.ndtri(p) self.assertAllClose(expected_x, self.evaluate(x), atol=0.)
def testNdtri(self): """Verifies that ndtri computation is correct.""" if not special: return p = np.linspace(0., 1.0, 50).astype(np.float64) # Quantile performs piecewise rational approximation so adding some # special input values to make sure we hit all the pieces. p = np.hstack((p, np.exp(-32), 1. - np.exp(-32), np.exp(-2), 1. - np.exp(-2))) expected_x = special.ndtri(p) x = special_math.ndtri(p) self.assertAllClose(expected_x, self.evaluate(x), atol=0.)
def testNdtriDynamicShape(self): """Verifies that ndtri computation is correct.""" with self.cached_session() as sess: if not special: return p = tf.placeholder(np.float32) p_ = np.linspace(0., 1.0, 50).astype(np.float32) x = special_math.ndtri(p) x_ = sess.run(x, feed_dict={p: p_}) expected_x_ = special.ndtri(p_) self.assertAllClose(expected_x_, x_, atol=0.)
def _baseNdtriFiniteGradientTest(self, dtype): """Verifies that ndtri has finite gradients at interesting points.""" # Tests gradients at 0, 1, and piece-wise boundaries. p = tf.constant( np.array([ 0., np.exp(-32.), np.exp(-2.), 1. - np.exp(-2.), 1. - np.exp(-32.), 1., ]).astype(dtype)) # Not having the lambda sanitzer means we'd get an `IndexError` whenever # the user supplied function has default args. _, grads = _value_and_gradient(lambda x: special_math.ndtri(x), p) # pylint: disable=unnecessary-lambda self.assertAllFinite(self.evaluate(grads[0]))
def _baseNdtriFiniteGradientTest(self, dtype): """Verifies that ndtri has finite gradients at interesting points.""" # Tests gradients at 0, 1, and piece-wise boundaries. p = tf.constant( np.array([ 0., np.exp(-32.), np.exp(-2.), 1. - np.exp(-2.), 1. - np.exp(-32.), 1., ]).astype(dtype)) # Not having the lambda sanitzer means we'd get an `IndexError` whenever # the user supplied function has default args. _, grads = _value_and_gradient( lambda x: special_math.ndtri(x), p) # pylint: disable=unnecessary-lambda self.assertAllFinite(self.evaluate(grads[0]))
def _inverse(self, y): with tf.control_dependencies(self._assertions(y)): return special_math.ndtri(y)
def _quantile(self, p): return self._inv_z(special_math.ndtri(p))
def _quantile(self, p): return special_math.ndtri(p) * self.scale + self.loc
def _probits_parameter_no_checks(self): if self._probits is None: probs = tf.convert_to_tensor(self._probs) return special_math.ndtri(probs) return tf.identity(self._probits)
def _inverse(self, y): y = self._maybe_assert_valid_y(y) return special_math.ndtri(y)
def norm_qdf(x): return special_math.ndtri(x)