def testNdtri(self): """Verifies that ndtri computation is correct.""" if not special: return p = np.linspace(0., 1.0, 50).astype(np.float64) # Quantile performs piecewise rational approximation so adding some # special input values to make sure we hit all the pieces. p = np.hstack((p, np.exp(-32), 1. - np.exp(-32), np.exp(-2), 1. - np.exp(-2))) expected_x = special.ndtri(p) x = special_math.ndtri(p) self.assertAllClose(expected_x, self.evaluate(x), atol=0.)
def testNdtriDynamicShape(self): """Verifies that ndtri computation is correct.""" with self.test_session() as sess: if not special: return p = array_ops.placeholder(np.float32) p_ = np.linspace(0., 1.0, 50).astype(np.float32) x = special_math.ndtri(p) x_ = sess.run(x, feed_dict={p: p_}) expected_x_ = special.ndtri(p_) self.assertAllClose(expected_x_, x_, atol=0.)
def testNdtri(self): """Verifies that ndtri computation is correct.""" with self.test_session(): if not special: return p = np.linspace(0., 1.0, 50).astype(np.float64) # Quantile performs piecewise rational approximation so adding some # special input values to make sure we hit all the pieces. p = np.hstack((p, np.exp(-32), 1. - np.exp(-32), np.exp(-2), 1. - np.exp(-2))) expected_x = special.ndtri(p) x = special_math.ndtri(p) self.assertAllClose(expected_x, x.eval(), atol=0.)
def _baseNdtriFiniteGradientTest(self, dtype): """Verifies that ndtri has finite gradients at interesting points.""" g = ops.Graph() with g.as_default(): # Tests gradients at 0, 1, and piece-wise boundaries. p = variables.Variable( np.array([0., np.exp(-32.), np.exp(-2.), 1. - np.exp(-2.), 1. - np.exp(-32.), 1.]).astype(dtype)) value = special_math.ndtri(p) grads = gradients_impl.gradients(value, p) with self.test_session(graph=g): variables.global_variables_initializer().run() self.assertAllFinite(grads[0])
def _baseNdtriFiniteGradientTest(self, dtype): """Verifies that ndtri has finite gradients at interesting points.""" # Tests gradients at 0, 1, and piece-wise boundaries. p = constant_op.constant( np.array([ 0., np.exp(-32.), np.exp(-2.), 1. - np.exp(-2.), 1. - np.exp(-32.), 1., ]).astype(dtype)) # Not having the lambda sanitizer means we'd get an `IndexError` whenever # the user supplied function has default args. _, grads = _value_and_gradient(lambda x: special_math.ndtri(x), p) # pylint: disable=unnecessary-lambda self.assertAllFinite(self.evaluate(grads[0]))
def _baseNdtriFiniteGradientTest(self, dtype): """Verifies that ndtri has finite gradients at interesting points.""" # Tests gradients at 0, 1, and piece-wise boundaries. p = constant_op.constant( np.array([ 0., np.exp(-32.), np.exp(-2.), 1. - np.exp(-2.), 1. - np.exp(-32.), 1., ]).astype(dtype)) # Not having the lambda sanitzer means we'd get an `IndexError` whenever # the user supplied function has default args. _, grads = _value_and_gradient( lambda x: special_math.ndtri(x), p) # pylint: disable=unnecessary-lambda self.assertAllFinite(self.evaluate(grads[0]))
def probit(x): return self.evaluate(special_math.ndtri(x))
def probit(x, sess=sess): return self.evaluate(special_math.ndtri(x))
def _quantile(self, p): return self._inv_z(special_math.ndtri(p))
def probit(x, sess=sess): return sess.run(special_math.ndtri(x))
def example_integrand(xarr, **kwargs): """Asian options test function""" sum1 = tf.reduce_sum(ndtri(xarr), axis=1) a = S0 * tf.exp((r-sigma2/2) + sigma*sqrtdt*sum1) arg = 1 / d * tf.reduce_sum(a) return e*tf.maximum(zero, arg-K)
def erfinv(x): return special_math.ndtri((x + 1.) / 2.0) / tf.sqrt(2.)
def probit(x): return special_math.ndtri(x)