Example #1
0
def gen_probs(n, a, b):
    r"""
    Generate the vector of probabilities for the Beta-binomial
    (n, a, b) distribution.

    The Beta-binomial distribution takes the form

    .. math::
        p(k \,|\, n, a, b) =
        {n \choose k} \frac{B(k + a, n - k + b)}{B(a, b)},
        \qquad k = 0, \ldots, n

    Parameters
    ----------
    n : scalar(int)
        First parameter to the Beta-binomial distribution
    a : scalar(float)
        Second parameter to the Beta-binomial distribution
    b : scalar(float)
        Third parameter to the Beta-binomial distribution

    Returns
    -------
    probs: array_like(float)
        Vector of probabilities over k

    """
    probs = np.zeros(n+1)
    for k in range(n+1):
        probs[k] = binom(n, k) * beta(k + a, n - k + b) / beta(a, b)
    return probs
Example #2
0
def beta_binomial(k,n,a,b,multi_precission=False):
	"""
	Beta binomial function, returning the probability of k successes in n trials (given a p distribution beta of barameters a and b), and supporting multiprecission output.
	Parameters
	----------
	k : int, ndarray
		Successes.
	n : int, ndarray
		Trials.
	a, b : int,ndarray
		Parameters of the beta distribution.
	multi_precission : bool, optional
		Whether or not to use multiprecision floating-point output (default: False).
	Returns
	-------
	p : int,ndarray
		Probability of k successes in n trials.
	Examples
	--------
	>>> n = 80000
	>>> k = 40000
	>>> mp_comb(n, k)
	7.0802212521852e+24079
	"""
	if multi_precission:
		import mpmath as mp
		p = mp_comb(n,k) * mp.beta(k+a, n-k+b) / mp.beta(a,b)
	else:
		from scipy.special import beta
		from scipy.misc import comb
		p = comb(n,k) * beta(k+a, n-k+b) / beta(a,b)
	return p
Example #3
0
    def pdf(self):
        r"""
        Generate the vector of probabilities for the Beta-binomial
        (n, a, b) distribution.

        The Beta-binomial distribution takes the form

        .. math::
            p(k \,|\, n, a, b) =
            {n \choose k} \frac{B(k + a, n - k + b)}{B(a, b)},
            \qquad k = 0, \ldots, n,

        where :math:`B` is the beta function.

        Parameters
        ----------
        n : scalar(int)
            First parameter to the Beta-binomial distribution
        a : scalar(float)
            Second parameter to the Beta-binomial distribution
        b : scalar(float)
            Third parameter to the Beta-binomial distribution

        Returns
        -------
        probs: array_like(float)
            Vector of probabilities over k

        """
        n, a, b = self.n, self.a, self.b
        k = np.arange(n + 1)
        probs = binom(n, k) * beta(k + a, n - k + b) / beta(a, b)
        return probs
Example #4
0
    def probability_of_n_purchases_up_to_time(self, t, n):
        """
        Compute the probability of

        P( N(t) = n | model )

        where N(t) is the number of repeat purchases a customer makes in t units of time.
        """

        r, alpha, a, b = self._unload_params("r", "alpha", "a", "b")

        first_term = (
            special.beta(a, b + n)
            / special.beta(a, b)
            * special.gamma(r + n)
            / special.gamma(r)
            / special.gamma(n + 1)
            * (alpha / (alpha + t)) ** r
            * (t / (alpha + t)) ** n
        )
        if n > 0:
            finite_sum = np.sum(
                [
                    special.gamma(r + j) / special.gamma(r) / special.gamma(j + 1) * (t / (alpha + t)) ** j
                    for j in range(0, n)
                ]
            )
            second_term = (
                special.beta(a + 1, b + n - 1) / special.beta(a, b) * (1 - (alpha / (alpha + t)) ** r * finite_sum)
            )
        else:
            second_term = 0
        return first_term + second_term
Example #5
0
def BetaBinomial_pdf(n, a, b):
    ''' BetaBinomial.
    '''
    k = np.arange(n + 1)

    probs = binom(n, k) * beta(k + a, n - k + b) / beta(a, b)

    return probs
Example #6
0
    def __init__(self, a=1.5, b=1.03, p=.2, q=.2, T=1):

        self.p, self.q = p, q
        # Scaling
        self.a = 100 * a / T**.5
        # Martingale restriction
        self.b = np.ones_like(b) * ss.beta(p, q) \
            / ss.beta(p+1/self.a, q-1/self.a)
 def deflect(self):
     xi =np.sqrt((self.image_x-self.centroid[0])**2+(self.image_y-self.centroid[1])**2)/self.a 
     alpha = 2*self.kappa_0*self.a/xi * (sp.beta((self.n_outer-3)/2.0,(3-self.gamma)/2.0) \
             - sp.beta((self.n_outer-3)/2.0, 3.0/2.0) * (1+xi**2)**((3-self.n_outer)/2.0)\
             * sp.hyp2f1((self.n_outer-3)/2.0,self.gamma/2.0,self.n_outer/2.0,1/(1+xi**2)))
             
     self.alpha_x = alpha*(self.image_x-self.centroid[0])/np.sqrt((self.image_x-self.centroid[0])**2+(self.image_y-self.centroid[1])**2)
     self.alpha_y = alpha*(self.image_y-self.centroid[1])/np.sqrt((self.image_x-self.centroid[0])**2+(self.image_y-self.centroid[1])**2)
Example #8
0
def beta_kl_divergence(a, b, aa, bb, samples=1000):
    '''
    Calculate the Kullback-Leibler divergence between to beta distributions,
    defined by a and b and aa and bb respectively.

    $KL(P || Q) = \int_{-infty}^{\infty} P \ln \frac{P}{Q}$
    '''

    return math.log(beta(aa, bb)/beta(a, b)) + (aa - a) * psi(a) + (bb - b) * psi(b) + (aa - a + bb - b) * psi(a+b)
    def test_reduce(self):
        phi1 = self.phi1.copy()
        phi1.reduce([('x', 1)])
        reduced_pdf1 = lambda y: (np.power(1, 1) * np.power(y, 2))/beta(1, y)
        self.assertEqual(phi1.scope(), ['y'])
        for inp in np.random.rand(4):
            self.assertEqual(phi1.pdf(inp), reduced_pdf1(inp))
            self.assertEqual(phi1.pdf(y=inp), reduced_pdf1(inp))

        phi1 = self.phi1.reduce([('x', 1)], inplace=False)
        self.assertEqual(phi1.scope(), ['y'])
        for inp in np.random.rand(4):
            self.assertEqual(phi1.pdf(inp), reduced_pdf1(inp))
            self.assertEqual(phi1.pdf(y=inp), reduced_pdf1(inp))

        phi2 = self.phi2.copy()
        phi2.reduce([('x2', 7.213)])
        reduced_pdf2 = lambda x1: multivariate_normal.pdf([x1, 7.213], [0, 0], [[1, 0], [0, 1]])
        self.assertEqual(phi2.scope(), ['x1'])
        for inp in np.random.rand(4):
            self.assertEqual(phi2.pdf(inp), reduced_pdf2(inp))
            self.assertEqual(phi2.pdf(x1=inp), reduced_pdf2(inp))

        phi2 = self.phi2.reduce([('x2', 7.213)], inplace=False)
        self.assertEqual(phi2.scope(), ['x1'])
        for inp in np.random.rand(4):
            self.assertEqual(phi2.pdf(inp), reduced_pdf2(inp))
            self.assertEqual(phi2.pdf(x1=inp), reduced_pdf2(inp))

        phi3 = self.phi3.copy()
        phi3.reduce([('y', 0.112), ('z', 23)])
        reduced_pdf4 = lambda x: 23*(np.power(x, 1)*np.power(0.112, 2))/beta(x, 0.112)
        self.assertEqual(phi3.scope(), ['x'])
        for inp in np.random.rand(4):
            self.assertEqual(phi3.pdf(inp), reduced_pdf4(inp))
            self.assertEqual(phi3.pdf(x=inp), reduced_pdf4(inp))

        phi3 = self.phi3.copy()
        phi3.reduce([('y', 0.112)])
        reduced_pdf3 = lambda x, z: z*(np.power(x, 1)*np.power(0.112, 2))/beta(x, 0.112)
        self.assertEqual(phi3.scope(), ['x', 'z'])
        for inp in np.random.rand(4, 2):
            self.assertEqual(phi3.pdf(inp[0], inp[1]), reduced_pdf3(inp[0], inp[1]))
            self.assertEqual(phi3.pdf(x=inp[0], z=inp[1]), reduced_pdf3(inp[0], inp[1]))

        phi3 = self.phi3.reduce([('y', 0.112)], inplace=False)
        self.assertEqual(phi3.scope(), ['x', 'z'])
        for inp in np.random.rand(4, 2):
            self.assertEqual(phi3.pdf(inp[0], inp[1]), reduced_pdf3(inp[0], inp[1]))
            self.assertEqual(phi3.pdf(x=inp[0], z=inp[1]), reduced_pdf3(inp[0], inp[1]))
            self.assertEqual(phi3.pdf(inp[0], z=inp[1]), reduced_pdf3(inp[0], inp[1]))

        phi3 = self.phi3.reduce([('y', 0.112), ('z', 23)], inplace=False)
        self.assertEqual(phi3.scope(), ['x'])
        for inp in np.random.rand(4):
            self.assertEqual(phi3.pdf(inp), reduced_pdf4(inp))
            self.assertEqual(phi3.pdf(x=inp), reduced_pdf4(inp))
Example #10
0
def gen_probs(n, a, b):
    """
    Generate and return the vector of probabilities for the Beta-binomial 
    (n, a, b) distribution.
    """
    probs = np.zeros(n + 1)
    for k in range(n + 1):
        probs[k] = binom(n, k) * beta(k + a, n - k + b) / beta(a, b)
    return probs
    def _get_bayesian_prob(self):
        alpha_a = 1 + self.success_count["A"]
        beta_a = 1 + self.failure_count["A"]
        alpha_b = 1 + self.success_count["B"]
        beta_b = 1 + self.failure_count["B"]

        return sum(
            beta(alpha_a + i, beta_b + beta_a) * 1.0 /
            ((beta_b + i) * beta(1 + i, beta_b) * beta(alpha_a, beta_a))
            for i in range(alpha_b)
        )
Example #12
0
def tukeylambda_kurtosis(lam):
    """Kurtosis of the Tukey Lambda distribution.

    Parameters
    ----------
    lam : array_like
        The lambda values at which to compute the variance.

    Returns
    -------
    v : ndarray
        The variance.  For lam < -0.25, the variance is not defined, so
        np.nan is returned.  For lam = 0.25, np.inf is returned.

    """
    lam = np.asarray(lam)
    shp = lam.shape
    lam = np.atleast_1d(lam).astype(np.float64)

    # For absolute values of lam less than threshold, use the Pade
    # approximation.
    threshold = 0.055

    # Use masks to implement the conditional evaluation of the kurtosis.
    # lambda < -0.25:  kurtosis = nan
    low_mask = lam < -0.25
    # lambda == -0.25: kurtosis = inf
    negqrtr_mask = lam == -0.25
    # lambda near 0:  use Pade approximation
    small_mask = np.abs(lam) < threshold
    # else the "regular" case:  use the explicit formula.
    reg_mask = ~(low_mask | negqrtr_mask | small_mask)

    # Get the 'lam' values for the cases where they are needed.
    small = lam[small_mask]
    reg = lam[reg_mask]

    # Compute the function for each case.
    k = np.empty_like(lam)
    k[low_mask] = np.nan
    k[negqrtr_mask] = np.inf
    if small.size > 0:
        k[small_mask] = _tukeylambda_kurt_p(small) / _tukeylambda_kurt_q(small)
    if reg.size > 0:
        numer = (1.0 / (4 * reg + 1) - 4 * beta(3 * reg + 1, reg + 1) +
                 3 * beta(2 * reg + 1, 2 * reg + 1))
        denom = 2 * (1.0/(2 * reg + 1) - beta(reg + 1, reg + 1))**2
        k[reg_mask] = numer / denom - 3

    # The return value will be a numpy array; resetting the shape ensures that
    # if `lam` was a scalar, the return value is a 0-d array.
    k.shape = shp
    return k
Example #13
0
    def __init__(self, param, data):
        """Initialize the model.

        """
        super().__init__(param, data)
        [param_a, param_p, param_c] = self.param
        self.param_p = param_p / 100
        self.param_a = param_a * 100 / self.data['maturity']**(.5 - param_c)
        self.param_q = .03
        self.param_b = (scp.beta(self.param_p, self.param_q) /
                        scp.beta(self.param_p+1/self.param_a,
                                 self.param_q-1/self.param_a))
Example #14
0
 def __init__(self, a=1.5, b=1.03, p=.2, q=.2, c=.0, T=1):
     #super(ClassName, self).__init__()
     self.N = np.max([np.array(a).size, np.array(b).size,
                      np.array(p).size, np.array(q).size,
                      np.array(T).size, np.array(c).size])
     self.p = p / 100
     self.q = q / 100
     # Scaling
     self.a = 100 * a / T**(.5 - c)
     # Martingale restriction
     self.b = np.ones_like(b) * ss.beta(p, q) \
         / ss.beta(p+1/self.a, q-1/self.a)
def log_likelihood(params):
	a = params[0]
	b = params[1]
	likelihood = 0.
	for i in range(len(x)):
		choose = float(factorial(n))/(factorial(x[i])*factorial(n-x[i]))
		print ">>>>>>>>>>choose is :",choose
		pi = (choose*special.beta(a+x[i], n+b-x[i])/special.beta(a,b))*y[i]/np.sum(y)
		print ">>>>>>>>>>>>each pi is:", pi
		likelihood += pi
	# print likelihood
	# return -np.sum(np.log(likelihood))
	return -1.*likelihood
def beta_binomial(k, n, a, b):
    """The pmf/pdf of the Beta-binomial distribution.

    Computation based on beta function.

    See: http://en.wikipedia.org/wiki/Beta-binomial_distribution
    and http://mathworld.wolfram.com/BetaBinomialDistribution.html
    
    k = a vector of non-negative integers <= n
    n = an integer
    a = an array of non-negative real numbers
    b = an array of non-negative real numbers
    """
    return (comb(n, k) * beta(k+a, n-k+b) / beta(a,b)).prod(0)
Example #17
0
def binomial(n, k):
    """ Binomial coefficient

               n!
    c =    ---------
           (n-k)! k!

    Parameters
    ----------
    n : float
       n of (n, k)
    k : float
       k of (n, k)

    Returns
    -------
    c : float

    Examples
    --------
    First 3 values of 4 th row of Pascal triangle

    >>> [binomial(4, k) for k in range(3)]
    [1.0, 4.0, 6.0]
    """
    if n <= k or n == 0:
        return 0.
    elif k == 0:
        return 1.
    return 1./(beta(n-k+1, k+1)*(n+1))
Example #18
0
def betaPrior(x, alpha, beta, left, right):
	Beta = sp.beta(alpha, beta)
	pf = np.zeros(len(x))	
	ylog = ( (1.0-alpha-beta) * np.log(right-left) - (sp.gammaln(alpha) + sp.gammaln(beta) - sp.gammaln(alpha+beta)) +
		(alpha-1.0) * np.log(x - left) + (beta-1.0) * np.log(right - x) )
	pf = np.exp(ylog)
	return pf
Example #19
0
def ibetam(a, b, x):
    """
    Incomplete beta function defined as the Mathematica Beta[x, a, b]:
    Beta[x, a, b] = Integral[t^(a - 1) * (1 - t)^(b - 1), {t, 0, x}]
    This routine only works for (0 < x < 1) & (b > 0) as required by JAM.

    """
    # V1.0: Michele Cappellari, Oxford, 01/APR/2008
    # V2.0: Use Hypergeometric function for negative a or b.
    #    From equation (6.6.8) of Abramoviz & Stegun (1964)
    #    MC, Oxford, 04/APR/2008
    # V3.0: Use recurrence relation of equation (26.5.16)
    #    from Abramoviz & Stegun (1964) for (a < 0) & (b > 0).
    #    See the online book here http://www.nr.com/aands/
    #    After suggestion by Gary Mamon. MC, Oxford, 16/APR/2009

    a = a + 3e-7  # Perturb to avoid singularities in gamma and betainc
    if np.all(a > 0):
        ib = special.betainc(a, b, x)
    else:
        p = int(np.ceil(np.abs(np.min(a))))
        tot = np.zeros((x.size, a.size))
        for j in range(p):  # Do NOT use gamma recurrence relation to avoid instabilities
            tot += special.gamma(j + b + a)/special.gamma(j + 1 + a)*x**(j + a)
        ib = tot*(1 - x)**b/special.gamma(b) + special.betainc(a + p, b, x)

    return ib*special.beta(a, b)
Example #20
0
def init_gam( y , x , c=c ):
    
    """Ininitial guess for gamma function. Uniform distribution"""    

    out = y**(c-1) * ( x - y )**(c-1)  / ( x**(2*c-1) ) / beta( c , c )
    out[y>x] = 0
    
    return out    
Example #21
0
def ppi(b):

    # parameters for geophys prior from Martins and Stedinger p. 740
    if abs(b) < 0.5:
        pp, qq = 6, 9
        return ((0.5 + b) ** (pp - 1) * (0.5 - b) ** (qq - 1)) / beta(pp, qq)
    else:
        return 0
Example #22
0
    def probability_of_n_purchases_up_to_time(self, t, n):
        """
        Compute the probability of

        P( N(t) = n | model )

        where N(t) is the number of repeat purchases a customer makes in t units of time.
        """

        r, alpha, a, b = self._unload_params('r', 'alpha', 'a', 'b')
        _j = np.arange(0, n)

        first_term = beta(a, b + n + 1) / beta(a, b) * gamma(r + n) / gamma(r) / gamma(n + 1) * (alpha / (alpha + t)) ** r * (t / (alpha + t)) ** n
        finite_sum = (gamma(r + _j) / gamma(r) / gamma(_j + 1) * (t / (alpha + t)) ** _j).sum()
        second_term = beta(a + 1, b + n) / beta(a, b) * (1 - (alpha / (alpha + t)) ** r * finite_sum)

        return first_term + second_term
Example #23
0
def betaAvgPrior(x, alpha, beta, left, right):
	Beta = sp.beta(alpha, beta)
	pf = np.zeros(len(x))
	for i in range(len(x)):
		ylog = ( (1.0-alpha-beta) * np.log(right-left) - (sp.gammaln(alpha) + sp.gammaln(beta) - sp.gammaln(alpha+beta)) +
			(alpha-1.0) * np.log(x[i] - left) + (beta-1.0) * np.log(right - x[i]) )		
		pf[i] = np.mean(np.exp(ylog))
	return pf
Example #24
0
def Dbeta(a=1.5,b=2.5): # the beta distribution
  return Distr(
    name='beta[a={0},b={1}]'.format(a,b),
    dom=(0.,1.), domv=(1.e-10,1.-1.e-10),
    mean=a/(a+b), std=sqrt(a*b/(a+b+1))/(a+b),
    pdf=lambda x: x**(a-1.)*(1.-x)**(b-1.)/beta(a,b),
    cdf=lambda x: betainc(a,b,x),
  )
Example #25
0
def tukeylambda_variance(lam):
    """Variance of the Tukey Lambda distribution.

    Parameters
    ----------
    lam : array_like
        The lambda values at which to compute the variance.

    Returns
    -------
    v : ndarray
        The variance.  For lam < -0.5, the variance is not defined, so
        np.nan is returned.  For lam = 0.5, np.inf is returned.

    Notes
    -----
    In an interval around lambda=0, this function uses the [4,4] Pade
    approximation to compute the variance.  Otherwise it uses the standard
    formula (http://en.wikipedia.org/wiki/Tukey_lambda_distribution).  The
    Pade approximation is used because the standard formula has a removable
    discontinuity at lambda = 0, and does not produce accurate numerical
    results near lambda = 0.
    """
    lam = np.asarray(lam)
    shp = lam.shape
    lam = np.atleast_1d(lam).astype(np.float64)

    # For absolute values of lam less than threshold, use the Pade
    # approximation.
    threshold = 0.075

    # Play games with masks to implement the conditional evaluation of
    # the distribution.
    # lambda < -0.5:  var = nan
    low_mask = lam < -0.5
    # lambda == -0.5: var = inf
    neghalf_mask = lam == -0.5
    # abs(lambda) < threshold:  use Pade approximation
    small_mask = np.abs(lam) < threshold
    # else the "regular" case:  use the explicit formula.
    reg_mask = ~(low_mask | neghalf_mask | small_mask)

    # Get the 'lam' values for the cases where they are needed.
    small = lam[small_mask]
    reg = lam[reg_mask]

    # Compute the function for each case.
    v = np.empty_like(lam)
    v[low_mask] = np.nan
    v[neghalf_mask] = np.inf
    if small.size > 0:
        # Use the Pade approximation near lambda = 0.
        v[small_mask] = _tukeylambda_var_p(small) / _tukeylambda_var_q(small)
    if reg.size > 0:
        v[reg_mask] = (2.0 / reg**2) * (1.0 / (1.0 + 2 * reg) -
                                      beta(reg + 1, reg + 1))
    v.shape = shp
    return v
def _sp_subvector_error_out_of_range(radius, dimensions, subdimensions):
    dist = SubvectorLength(dimensions, subdimensions)
    sq_r = radius * radius

    normalization = 1.0 - dist.cdf(radius)
    b = (dimensions - subdimensions) / 2.0
    aligned_integral = beta(subdimensions / 2.0 + 1.0, b) * (1.0 - betainc(
        subdimensions / 2.0 + 1.0, b, sq_r))
    cross_integral = beta((subdimensions + 1) / 2.0, b) * (1.0 - betainc(
        (subdimensions + 1) / 2.0, b, sq_r))

    numerator = (sq_r * normalization + (
        aligned_integral - 2.0 * radius * cross_integral) / beta(
        subdimensions / 2.0, b))
    with np.errstate(invalid='ignore'):
        return np.where(
            numerator > np.MachAr().eps,
            numerator / normalization, np.zeros_like(normalization))
Example #27
0
    def gb2_density(self, arg):
        """Density of the return.

        """
        return (self.param_a * arg**(self.param_a*self.param_p-1) /
                (self.param_b**(self.param_a*self.param_p) *
                scp.beta(self.param_p, self.param_q) *
                (1 + (arg / self.param_b) ** self.param_a) **
                (self.param_p+self.param_q)))
Example #28
0
def BetaDistribution (xs, alpha=0, beta=1):
	ys = []
	for x in xs:
		# 1/B(a,b) * x^a-1 * (1-x)^(b-1)
		y = (x ** (alpha-1)) * ((1-x) ** (beta-1))
		#BetaInv = mt.factorial(alpha+beta-1) / ( mt.factorial(alpha-1)*mt.factorial(beta-1) )
		BetaInv = 1.0 / sp.beta(alpha,beta)
		#print y, FuncBeta
		y *= BetaInv
		ys.append(y)
	return ys
Example #29
0
 def binomial_conf_interval(x, n, conf=0.95):
     if n == 0:
         left = random.random()*(1 - conf)
         return left, left + conf
     b = special.beta(x+1, n-x+1)
     def f(left_a):
         left, right = max(1e-8, special.betaincinv(x+1, n-x+1, left_a)), min(1-1e-8, special.betaincinv(x+1, n-x+1, left_a + conf))
         top = right**(x+1) * (1-right)**(n-x+1) * left*(1-left) - left**(x+1) * (1-left)**(n-x+1) * right * (1-right)
         bottom = (x - n*right)*left*(1-left) - (x - n*left)*right*(1-right)
         return top/bottom/b
     left_a = find_root(f, (1-conf)/2, bounds=(0, 1-conf))
     return special.betaincinv(x+1, n-x+1, left_a), special.betaincinv(x+1, n-x+1, left_a + conf)
Example #30
0
def gam( y , x , a=a, b=b):
    
    """True post-fragmentation density distribution
       used for data generation."""
       
    out = y**(a-1) * ( np.abs( x - y )**(b-1) )  / ( x**(a+b-1) ) / beta( a , b )
    
    if type(x) == np.ndarray or type(y) == np.ndarray:        
        out[y>x] = 0
        out[ np.isnan(out) ] = 0    
        out[ np.isinf(out) ] = 0  

    return out 
Example #31
0
a_prior, b_prior = 1, 1

Y = stats.bernoulli(0.7).rvs(20)

N1, N0 = Y.sum(), len(Y) - Y.sum()

a_post = a_prior + N1
b_post = b_prior + N0

prior_pred_dist, post_pred_dist = [], []
N = 20

for k in range(N + 1):
    post_pred_dist.append(
        comb(N, k) * beta(k + a_post, N - k + b_post) / beta(a_post, b_post))
    prior_pred_dist.append(
        comb(N, k) * beta(k + a_prior, N - k + b_prior) /
        beta(a_prior, b_prior))

fig, ax = plt.subplots()
ax.bar(np.arange(N + 1), prior_pred_dist, align='center', color='grey')
ax.set_title(f"Prior predictive distribution", fontweight='bold')
ax.set_xlim(-1, 21)
ax.set_xticks(list(range(N + 1)))
ax.set_xticklabels(list(range(N + 1)))
ax.set_ylim(0, 0.15)
ax.set_xlabel("number of success")

fig, ax = plt.subplots()
ax.bar(np.arange(N + 1), post_pred_dist, align='center', color='grey')
Example #32
0
 def pdf(self, F, y, Y_metadata=None):
     eF = safe_exp(F)
     pdf = (y**(eF[:, 0] - 1)) * (
         (1 - y)**(eF[:, 1] - 1)) / beta(eF[:, 0], eF[:, 1])
     return pdf
Example #33
0
 def cox_snell(x):
     return (special.betainc(2 / 3., 2 / 3., x) *
             special.beta(2 / 3., 2 / 3.))
Example #34
0
 def pdf(a, b, c, aa, bb, x):
     return c / sp.beta(
         a / c, b) * aa / bb * math.pow(x / bb, a * aa - 1) * math.pow(
             1 + math.pow(x / bb, aa), -a - 1) * math.pow(
                 1 - math.pow(1 - 1 / (1 + math.pow(x / bb, aa)), c), b - 1)
Example #35
0
abs(x)
np.absolute(x)

theta=np.linspace(0, np.pi, 3)
print(theta, np.sin(theta), np.cos(theta), np.tan(theta))
x=[-1,0,1]
print(x, np.arcsin(x), np.arccos(x), np.arctan(x))
x=[0,0.001,0.01,0.1]
np.expm1(x)
np.log1p(x)

from scipy import special
x=[1,5,10]
special.gamma(x)
special.gammaln(x)
special.beta(x,2)

x=np.arange(5)
y=np.empty(5)
np.multiply(x,10,out=y)
y
y=np.zeros(10)
np.power(2,x,out=y[::2])
y

x=np.arange(1,6)
np.add.reduce(x)
np.multiply.reduce(x)
np.add.accumulate(x)
np.multiply.accumulate(x)
np.multiply.outer(x,x)
Example #36
0
 def _pdf(self, x, c):
     return np.power((1.0-x*x),c/2.0-1) / special.beta(0.5,c/2.0)
Example #37
0
 def _mom(self, k, a, b):
     return special.beta(a + k, b) / special.beta(a, b)
def expect(d, N):
    return N * beta(1 + 1. / d, N)
def _kumaraswamy_moment(a, b, n):
    a = np.asarray(a)
    b = np.asarray(b)
    return b * sp_special.beta(1.0 + n / a, b)
def Sigma_Exact(x):
    # Parameters of the Hypergeometric
    a, b, c = 0.5, 2.0, 2.5
    y = pow((np.sqrt(x + 1) - np.sqrt(x)), 4)
    return sc.beta(2, 0.5) * sc.hyp2f1(a, b, c, y) / x
def Sigma_Large_pt(x):
    # Asymptotic behavior of the FO cross section at low pt
    return sc.beta(2, 0.5) / x
Example #42
0
 def _munp(self, n, c):
     return (1-(n % 2))*special.beta((n+1.0)/2,c/2.0)
Example #43
0
 def _cdf_skip(self, x, c):
     #error inspecial.hyp2f1 for some values see tickets 758, 759
     return 0.5 + x/special.beta(0.5,c/2.0)* \
            special.hyp2f1(0.5,1.0-c/2.0,1.5,x*x)
Example #44
0
 def _pdf(self, x, a, b):
     return x**(a-1)*(1-x)**(b-1)/ \
         special.beta(a, b)
Example #45
0
 def pdf(self, f, g, y, Y_metadata=None):
     ef = np.exp(f)
     eg = np.exp(g)
     pdf = y**(ef-1) * (1-y)**(eg-1) / beta(ef, eg)
     return pdf
Example #46
0
#!/usr/bin/python3
## file: rejection_beta.py
import numpy as np
import numpy.random as nprd
import scipy.special as scisp
# 设定参数
alpha = 4
beta = 2
# beta分布密度函数
f=lambda x: 1/scisp.beta(alpha,beta)* \
    x**(alpha-1) * (1-x)**(beta-1)
# 计算密度函数最大值
M = f((1 - alpha) / (2 - alpha - beta))
print(M)
# 随机抽两个均匀分布,一个为(0,1),一个为(0,M),抽500个
N = 500
x = nprd.random(N)
u = nprd.random(N) * M
# 挑出使得u<beta密度函数的x
accepted = [i for i in range(N) if u[i] <= f(x[i])]
rand_beta = x[accepted]  #生成的Beta分布随机数
rand_U_selected = u[accepted]
# 画图
import matplotlib.pyplot as plt
# 设定图像大小和坐标范围
plt.rcParams['figure.figsize'] = (8.0, 5.0)
plt.xlim(0, 1)
plt.ylim(0, M + 0.1)
# 横线和竖线
x_grid = np.linspace(0, 1, 100)  #(0,1)均匀的100个点
y_M = np.ones(100) * M
Example #47
0
 def _sf(self, x, alpha):
     return x * special.beta(x, alpha + 1)
Example #48
0
 def beta_distribution(x, a, b):
     return (1.0 / ss.beta(a,b)) * x**(a-1) * (1-x)**(b-1)
Example #49
0
 def _pmf(self, x, alpha):
     return alpha * special.beta(x, alpha + 1)
Example #50
0
 def _cdf(self, x, alpha):
     return 1 - x * special.beta(x, alpha + 1)