Пример #1
0
def zeta_minmax(gamma, kmin, kmax):
    '''kmax == None means kmax --> infty
    '''
    #     gamma = gamma[0]
    if gamma <= 1.0:
        if kmax == None:
            print('ERROR: Series does not converge!!!')
            C = 0
        else:
            mpm.dps = 25
            #             print(gamma,kmin,kmax,'huhu')
            C = (float(mpm.sumem(lambda k: k**(-gamma), [kmin, kmax])))
    else:
        # print(kmax)
        # print(type(kmax))
        # print(kmax==None)
        # print('')
        if isinstance(kmax, (list, np.ndarray)):
            C = zeta(gamma, kmin) - zeta(gamma, kmax)
        elif kmax == None:
            C = zeta(gamma, kmin)
        else:
            C = zeta(gamma, kmin) - zeta(gamma, kmax)
        # print(C)
    return C
Пример #2
0
def power_law_likelihoods(data, alpha, xmin, xmax=False, discrete=False):
    if alpha<0:
        from numpy import tile
        from sys import float_info
        return tile(10**float_info.min_10_exp, len(data))

    data = data[data>=xmin]
    if xmax:
        data = data[data<=xmax]

    if not discrete:
        likelihoods = (data**-alpha)/\
                ( (alpha-1) * xmin**(alpha-1) )
    if discrete:
        if alpha<1:
            from numpy import tile
            from sys import float_info
            return tile(10**float_info.min_10_exp, len(data))
        if not xmax:
            from scipy.special import zeta
            likelihoods = (data**-alpha)/\
                    zeta(alpha, xmin)
        if xmax:
            from scipy.special import zeta
            likelihoods = (data**-alpha)/\
                    (zeta(alpha, xmin)-zeta(alpha,xmax+1))
    from sys import float_info
    likelihoods[likelihoods==0] = 10**float_info.min_10_exp
    return likelihoods
def b4(nf):
    a1 = (8157455/16) + (621885/2)*spec.zeta(3) - (88209/2)*spec.zeta(4) - 288090*spec.zeta(5)
    a2 = nf*(-(336460813/1944) - (4811164/81)*spec.zeta(3) + (33935/6)*spec.zeta(4) + (1358995/27)*spec.zeta(5))
    a3 = (nf**2)*((25960913/1944) + (698531/81)*spec.zeta(3) - (10526/9)*spec.zeta(4) - (381760/81)*spec.zeta(5))
    a4 = (nf**3)*(-(630559/5832) - (48722/243)*spec.zeta(3) + (1618/27)*spec.zeta(4) + (460/9)*spec.zeta(5))
    a5 = (nf**4)*((1205/2916) - (152/81)*spec.zeta(3))
    return a1 + a2 + a3 + a4 + a5
Пример #4
0
    def EtaB(self):
        #Define fixed quantities for BEs
        epstt = np.real(self.epsilon1ab(2, 2))
        epsmm = np.real(self.epsilon1ab(1, 1))
        epsee = np.real(self.epsilon1ab(0, 0))
        Ti = 100 * self.M1  # initial temp 100 greater than mass N1
        rRadi = np.pi**2 * self.ipol_gstar(
            Ti
        ) / 30. * Ti**4  # initial radiation domination rho_RAD = pi^2* gstar(T[0])/30*T^4
        y0 = np.array([0., Ti, 0.])
        nphi = (2. * zeta(3) / np.pi**2) * Ti**3
        params = np.array([epstt, epsmm, epsee, np.real(rRadi), 0.])
        aflog10 = 5.0
        t1 = np.linspace(0., aflog10, num=1000, endpoint=True)

        # solve equation
        ys = odeint(self.RHS, y0, t1, args=tuple(params))
        # functions for converting to etaB using the solution to find temp
        T = ys[:, 1]
        gstarSrec = self.ipol_gstarS(0.3e-9)  # d.o.f. at recombination
        gstarSoff = self.ipol_gstarS(
            T[-1])  # d.o.f. at the end of leptogenesis
        SMspl = 28. / 79.
        zeta3 = zeta(3)
        ggamma = 2.
        coeffNgamma = ggamma * zeta3 / np.pi**2
        Ngamma = coeffNgamma * (10**t1 * T)**3
        coeffsph = SMspl * gstarSrec / gstarSoff
        self.ys = np.empty((len(T), 4))
        self.ys[:, 0] = t1
        self.ys[:, 1] = T
        self.ys[:, 2] = ys[:, 2]
        self.ys[:, -1] = coeffsph * (ys[:, 2]) * nphi / Ngamma
        return self.ys[-1][-1]
Пример #5
0
    def run(self, sim, delta):
        if self.stop_interval == 1:
            a = self.a
            c = self.c
            if self.b is None:
                b = (math.log(zeta(2 * a / c, 1)) - math.log(delta)) * c / 2
            else:
                b = self.b
        else:
            a = self.a
            c = self.stop_interval
            if self.b is None:
                b = (math.log(zeta(2 * a, 1)) - math.log(delta)) / 2
            else:
                b = self.b

        next_stop = 1
        sum = 0.0
        n = 0.0
        while True:
            sum += sim.sim()
            n += 1.0
            boundary = math.sqrt(a * n * math.log(math.log(n, c) + 1) + b * n) * self.ratio
            if self.stop_interval != 1:
                if n < next_stop:
                    continue
                while next_stop < n + 1:
                    next_stop *= c
            if sum >= boundary:
                return True
            elif sum <= -boundary:
                return False
Пример #6
0
    def Z(self, gamma):
        """
        Partition function Z for discrete and continuous powerlaw distributions.

        parameters
        ----------
        gamma: (float)
            exponent guess.

        returns
        ------
        s:
            Partition value.

        """

        if self.discrete == True:  # when powerlaw is discrete
            if np.isfinite(self.xmax):  # if xmax is NOT infinity:
                #Calculate zeta from Xmin to Infinity and substract Zeta from Xmax to Infinity
                #To find zeta from Xmin to Xmax.
                s = zeta(gamma, self.xmin) - zeta(gamma, self.xmax)
            else:
                #if xmax is infinity, simply calculate zeta from Xmin till infinity.
                s = zeta(gamma, self.xmin)
        else:
            #calculate normalization function when powerlaw is continuous.
            #s=(xmax^(-gamma+1)/(1-gamma))-(xminx^(-gamma+1)/(1-gamma))
            s = (self.xmax**(-gamma + 1) / (1 - gamma)) - \
                (self.xmin**(-gamma + 1) / (1 - gamma))
        return s
def zeta_minmax(gamma, kmin, kmax):
    '''kmax == None means kmax == infty
    '''
    if gamma <= 1.0:
        if isinstance(kmax, (list, np.ndarray)):
            kmax_max = np.max(kmax)
            x = (1.0 * np.arange(kmin, kmax_max + 1, 1))**(-gamma)
            Fx = np.cumsum(x)
            C = []
            # C0 = mpm.zeta(gamma,kmin)

            for k_ in kmax:
                # C += [ float(C0-mpm.zeta(gamma,float(k_)))]
                C += [Fx[int(k_ - kmin)]]
            C = np.array(C)
        elif kmax == None:
            print('ERROR: Series does not converge!!!')
            C = 0
        else:
            mpm.dps = 25
            # C = (float(mpm.sumem(lambda k: k**(-gamma),[kmin,kmax])))
            C = float(
                mpm.zeta(gamma, float(kmin)) - mpm.zeta(gamma, float(kmax)))
    else:
        if isinstance(kmax, (list, np.ndarray)):
            C = zeta(gamma, kmin) - zeta(gamma, kmax)
        elif kmax == None:
            C = zeta(gamma, kmin)
        else:
            C = zeta(gamma, kmin) - zeta(gamma, kmax)
    return C
Пример #8
0
def get_alpha(xCCDF, yCCDF, xmin, N):
    """
    Fit power-law distribution
    """

    bins = xCCDF
    n = len(xCCDF)
    a = 0
    best_chi = 50000

    for alpha in np.linspace(1.5, 6., 5001):
        Theoretical_CCDF = (zeta(alpha, bins) / zeta(alpha, xmin))

        chi = np.sum(
            (yCCDF * N - Theoretical_CCDF * N)**2 / (Theoretical_CCDF * N))

        if chi < best_chi:
            best_chi = chi
            a = alpha

    best_chi = stats.chi2.cdf(best_chi, n - 1)
    alpha = a
    Theoretical_CCDF = (zeta(alpha, bins) / zeta(alpha, xmin))

    return [1 - best_chi, alpha, Theoretical_CCDF]
Пример #9
0
def r_M(alpha):
    if (alpha == -1):
        return 216 * np.log(2) * zeta(3) / np.pi**4
    A = 2**(alpha + 1)
    r = (((4 * A**2 - 5 * A + 1) *
          (alpha + 3) * zeta(alpha + 2) * zeta(alpha + 4)) /
         ((2 * A - 1)**2 * (alpha + 2) * (zeta(alpha + 3))**2))
    return r
Пример #10
0
def ks_dist_d( data, threshold ) :
## Estimate the power given the current threshold
	alpha, sd = mle_alpha_d( data, threshold )
## Construct the CDF in the current environment
	cdf = lambda k : zeta( alpha, threshold + k ) / zeta( alpha, threshold )
## Return the output of the out-of-the box Kolmogorov-Smirnov test:
##  the infinity norm of the difference between the distribution functions.
	d, pv = kstest( [ v for v in data if v >= threshold ], cdf )
	return (d, pv), (alpha, sd)
Пример #11
0
 def skewness(rho):
     z0 = sp.zeta(rho)
     z1 = sp.zeta(rho + 1)
     zn1 = sp.zeta(rho - 1)
     zn2 = sp.zeta(rho - 2)
     if (rho > 3):
         return (2 * z0**3 / z1**3 - 3 * zn1 * z0 / z1**2 +
                 zn2 / z1) / (zn1 / z1 - z0**2 / z1**2)
     return np.Infinity
Пример #12
0
    def prob(self, params, ranks=None, log=False):
        if ranks is None:
            ranks = self.exog

        alpha, beta = params
        if log:
            return -alpha * lg(beta + ranks) - lg(zeta(alpha, q=beta + 1.))
        else:
            return ((beta + ranks)**(-alpha)) / zeta(alpha, q=beta + 1.)
Пример #13
0
def reimann_start(alpha, start):

    if start == 1:
        return zeta(alpha)
    elif start < 1:
        print("Zeta called on non positive start value.")
        return 0
    else:
        return zeta(alpha) - reimann_stop(alpha, start - 1)
Пример #14
0
 def kurtosis(rho):
     z0 = sp.zeta(rho)
     z1 = sp.zeta(rho + 1)
     zn1 = sp.zeta(rho - 1)
     zn2 = sp.zeta(rho - 2)
     zn3 = sp.zeta(rho - 3)
     if (rho > 4):
         return (-3 * z0**4 + 6 * zn1 * z1 * z0**2 - 4 * zn2 * z1**2 * z0 +
                 zn3 * z1**3) / (z0**2 - zn1 * z1)**2
     return np.Infinity
Пример #15
0
def Dhat3(n):
    return (1 / 80) * (
        20 * pow(np.euler_gamma, 4) + 20 * pow(np.euler_gamma * np.pi, 2) +
        3 * pow(np.pi, 4) + 20 *
        ((6 * pow(np.euler_gamma, 2) + pow(np.pi, 2)) *
         pow(sc.polygamma(0, n), 2) +
         4 * np.euler_gamma * pow(sc.polygamma(0, n), 3) +
         pow(sc.polygamma(0, n), 4) + np.euler_gamma *
         (sc.polygamma(2, n) + 8 * sc.zeta(3)) + sc.polygamma(0, n) *
         (4 * pow(np.euler_gamma, 3) + 2 * np.euler_gamma * pow(np.pi, 2) +
          sc.polygamma(2, n) + 8 * sc.zeta(3))))
Пример #16
0
def estimate_parameters(series, min_size_series=50, discrete=False):
    """
    
    Apply Clauset et al.'s method to find the best fit value of xmin and Alpha.

    **Parameters**

        series : series of data to be fit.
        
        min_size_series : Minimum possible size of the distribution to which power-law fit will be attempted. Fitting power-law to a very small series would give biased results where power-law may appear to be a good fit even when data is not drawn from power-law distribution. The default value is taken to be 50 as suggested in the paper.

        discrete : Boolean, whether to treat series as discrete or continous. Default value is False

    **Returns**

        Tuple of (Estimated xmin, Estimated Alpha value, minimum KS statistics score).

    """

    sorted_series = sorted(series)
    xmin_candidates = []
    x_prev = sorted_series[0]
    xmin_candidates.append(x_prev)
    for x in sorted_series:
        if (x > x_prev):
            x_prev = x
            xmin_candidates.append(x_prev)

    ks_statistics_min = 100000
    xmin_result = 0
    Alpha_result = 2
    for xmin in xmin_candidates[:-1 * (min_size_series - 1)]:
        data = filter(lambda x: x >= xmin, sorted_series)
        estimated_Alpha = estimate_scaling_parameter(data, xmin)
        if (discrete):
            Px = [
                zeta(estimated_Alpha, x) / zeta(estimated_Alpha, xmin)
                for x in unique(data)
            ]
        else:
            Px = [
                pow(float(x) / xmin, 1 - estimated_Alpha) for x in unique(data)
            ]
        n = len(Px)
        Sx = [i[1] / n for i in frequency_distribution(data, pdf=False)]
        ks_statistics = max(
            map(lambda counter: abs(Sx[counter] - Px[counter]), range(0, n)))
        if (ks_statistics < ks_statistics_min):
            ks_statistics_min = ks_statistics
            xmin_result = xmin
            Alpha_result = estimated_Alpha

    return (xmin_result, Alpha_result, ks_statistics_min)
Пример #17
0
def test_zeta():
    value = np.random.rand(1).item() * 5 + 1
    assert (roundScaler(NumCpp.riemann_zeta_Scaler(value), NUM_DECIMALS_ROUND) ==
            roundScaler(sp.zeta(value, 1).item(), NUM_DECIMALS_ROUND))

    shapeInput = np.random.randint(20, 100, [2, ])
    shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
    cArray = NumCpp.NdArray(shape)
    data = np.random.rand(shape.rows, shape.cols) * 5 + 1
    cArray.setArray(data)
    assert np.array_equal(roundArray(NumCpp.riemann_zeta_Array(cArray), NUM_DECIMALS_ROUND),
                          roundArray(sp.zeta(data, 1), NUM_DECIMALS_ROUND))
Пример #18
0
 def __init__(self, confidence, stop_interval=1):
     BanditAgent.__init__(self)
     self.confidence = confidence / 2
     self.stop_interval = stop_interval
     if stop_interval != 1:
         self.a = 0.55
         self.c = self.stop_interval
         self.b = (math.log(zeta(2 * self.a, 1)) - math.log(confidence)) / 2
     else:
         self.a = 0.6
         self.c = 1.1
         self.b = (math.log(zeta(2 * self.a / self.c, 1)) - math.log(confidence)) * self.c / 2
Пример #19
0
def D3(n):
    return (1 / 80) * (
        20 * pow(np.euler_gamma, 4) + 20 * pow(np.euler_gamma * np.pi, 2) +
        3 * pow(np.pi, 4) - 20 * sc.polygamma(3, n) + 20 *
        (4 * np.euler_gamma * pow(sc.polygamma(0, n), 3) +
         pow(sc.polygamma(0, n), 4) + pow(sc.polygamma(0, n), 2) *
         (6 * pow(np.euler_gamma, 2) + pow(np.pi, 2) - 6 * sc.polygamma(1, n)) -
         (6 * pow(np.euler_gamma, 2) + pow(np.pi, 2)) * sc.polygamma(1, n) +
         3 * pow(sc.polygamma(1, n), 2) + 4 * np.euler_gamma *
         (sc.polygamma(2, n) + 2 * sc.zeta(3)) + 2 * sc.polygamma(0, n) *
         (2 * pow(np.euler_gamma, 3) + np.euler_gamma * pow(np.pi, 2) -
          6 * np.euler_gamma * sc.polygamma(1, n) + 2 * sc.polygamma(2, n) +
          4 * sc.zeta(3))))
 def _cumulative_distribution_function(cell, dataX = None, minX = None, alpha = None):
     if dataX is None:
         dataX = cell.dataX
     if minX is None:
         minX = cell.minX
     if alpha is None:
         alpha = cell.alpha
     from scipy.special import zeta
     total_level = len(dataX)
     power_cdf = np.zeros(total_level)
     for xn in range(total_level):
         power_cdf[xn] = zeta(alpha,  dataX[xn]) / zeta(alpha,  minX)
     return power_cdf
Пример #21
0
def powerlaw_gamma_MLE_variance(gamma,kmin,N):
  zeta_g_kmin = zeta(gamma,kmin)
  if zeta(gamma,kmin) == 0:
    return np.nan
    
  zeta_ratio_1 = hurwitz_2nd_der_appx(gamma,kmin)/zeta_g_kmin
  zeta_ratio_2 = (hurwitz_1st_der_appx(gamma,kmin)/zeta_g_kmin)**2
  var = N*(zeta_ratio_1-zeta_ratio_2)
  if var > 0:
    var = 1/(np.sqrt(var))
    return var
  else:
    return np.nan
Пример #22
0
def get_delta_beta_amp(sigma, gamma):
    """
    Returns power spectrum amplitude for beta fluctuations that
    should achieve a given standard deviation. Assumes l_cutoff=2.

    Args:
        sigma: requested standard deviation.
        gamma: tilt

    Returns:
        Spectral index power spectrum amplitude.
    """
    return 4*np.pi*sigma**2*80**gamma/(-3+2*zeta(-1-gamma)+zeta(-gamma))
def b3(nf):
    a1 = (Ca**4)*(150653./486 - (44./9)*spec.zeta(3))
    a2 = dada*(-80./9 + (704./3)*spec.zeta(3))
    a3 = (Ca**3)*Tf*nf*(-39143./81 + (136./3)*spec.zeta(3))
    a4 = (Ca**2)*Cf*Tf*nf*(7073./243 - (653./9)*spec.zeta(3))
    a5 = Ca*(Cf**2)*Tf*nf*(-4204./27 + (352./9)*spec.zeta(3))
    a6 = dfda*nf*(512./9 - (1664./3)*spec.zeta(3))
    a7 = 46*(Cf**3)*Tf*nf
    a8 = ((Ca*Tf*nf)**2)*(7930./81 + (224./9)*spec.zeta(3))
    a9 = ((Ca*Tf*nf)**2)*(1352./27 - (704./9)*spec.zeta(3))
    a10 = Ca*Cf*((Tf*nf)**2)*(17152./243 + (448./9)*spec.zeta(3))
    a11 = dfdf*(nf**2)*(-704./9 + (512./3)*spec.zeta(3))
    a12, a13 = (424./243)*Ca*(Tf*nf)**3, (1232./243)*Cf*(Tf*nf)**3
    return a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8 + a9 + a10 + a11 + a12 + a13
Пример #24
0
def fit_discrete_power_law_from_distribution(
    x,
    P,
):
    """ Fits a power-law to the given distribution
    by a fake redrawing from the given distribution
    after https://arxiv.org/pdf/0706.1062.pdf .

    Parameters
    ----------
    x : `numpy.array` or :obj:`list` of `float`
        mean values of the bins (for discrete data 
        this is just the values of the discrete data).
    P : `numpy.array` or :obj:`list` of `float` 
        The corresponding weights for bin means given 
        in `x` (`P` does not have to be normalized)

    Returns
    -------
    alpha : `float`
        The exponent of the power law P(x) ~ x^{-alpha}.
    error : `float`
        The standard error of alpha.
    xmin : `float`
        The inferred minimum value of the data.
    """

    x = np.array(x, dtype=float)
    P = np.array(P, dtype=float)

    x_min = x.min()
    n = P.sum()
    sum_ln_x = np.log(x).dot(P)

    negative_likelihood = lambda alpha: n * np.log(zeta(alpha, x_min)
                                                   ) + alpha * sum_ln_x
    minimum = fmin(negative_likelihood, 10, disp=False)
    alpha = minimum[0]
    z = zeta(alpha, x_min)
    z_1 = sum_until_value_threshold(lambda x: -np.log(x + x_min) /
                                    (x + x_min)**alpha,
                                    start_at=x_min,
                                    thresh=1e-6)
    z_2 = sum_until_value_threshold(lambda x: +np.log(x + x_min)**2 /
                                    (x + x_min)**alpha,
                                    start_at=x_min,
                                    thresh=1e-6)
    err = (n * (z_2 / z - (z_1 / z)**2))**(-1.0)

    return alpha, err, x_min
Пример #25
0
 def d2logp_df2(self, F, y, Y_metadata=None):
     eF = safe_exp(F)
     a = eF[:,0, None]
     b = eF[:,1, None]
     a = np.clip(a, 1e-9, 1e9)  # numerical stability
     b = np.clip(b, 1e-9, 1e9)  # numerical stability
     psi_ab = psi(a+b)
     psi_a = psi(a)
     psi_b = psi(b)
     zeta_ab = zeta(2,a+b)
     zeta_a = zeta(2,a)
     zeta_b = zeta(2,b)
     d2logp_dfa2 = (psi_ab + (a*zeta_ab) - psi_a - (a*zeta_a) + np.log(y)) * a
     d2logp_dfb2 = (psi_ab + (b*zeta_ab) - psi_b - (b*zeta_b) + np.log(1-y)) * b
     return d2logp_dfa2, d2logp_dfb2
Пример #26
0
def power_law_degree(n,power):
    deg = 1 #The smallest degree in the degree sequence
    deg_list=[] #The degree sequence
    # In order to follow a power law, the fraction of times that degree d appears in the degree sequence
    # must be 1/d**power. Since the sum of these fractions must be 1, we normalize each fraction.
    # I.e., d appears in the sequence a fraction of times that is (1/d**power)/Z,
    # where Z = \sum_d 1/d**power. The value of Z is computed by the function zeta in the library scipy.special
    z=zeta(power)
    somma=0
    while len(deg_list) < n:
        p = 1/((deg**power)*z) #The fraction of occurrences of deg in the sequence
        num=math.ceil(p*n) #The number of occurrences of deg in the sequence.
        #We may use the floor or the closest integer. However, be careful:
        #If we use math.floor, then when the fraction is very small the number returned is 0,
        #thus no degree is inserted in the sequence, and the algorithm does not terminate

        # Add deg to the sequence num times, or until the sequence is not full
        for i in range(num):
            if len(deg_list) == n:
                break
            deg_list.append(deg)
            somma+=deg
        deg+=1

    # To check that the generated sequence is valid, one can compute the sum of the inserted degree
    # (this is done within the above while loop)
    # If the value of the sum is odd, to fix the sequence, it is sufficient to increase the value of deg_list[0].
    if somma %2 != 0:
        deg_list[0]+=1

    return deg_list
def wesd(EVAL1, EVAL2, Vol1, Vol2, d):
	''' Weighted Spectral Distance. See Konukoglu et al. (2012)'''
	
	if d==3:
		Ball = 4.0/3*np.pi # For Three Dimensions
		p = 2.0
	elif d==2:
		Ball = np.pi
		p = 1.5
	
	d = float(d)
	
	Vol = np.amax((Vol1, Vol2))
	mu = np.amax(EVAL1[1], EVAL2[1])
	
	C = ((d+2)/(d*4*np.pi**2)*(Ball*Vol)**(2/d) - 1/mu)**p + ((d+2)/(d*4*np.pi**2)*(Ball*Vol/2)**(2/d) - 1/mu*(d/(d+4)))**p
	
	K = ((d+2)/(d*4*np.pi**2)*(Ball*Vol)**(2/d) - (1/mu)*(d/(d+2.64)))**p
	
	W = (C + K*(zeta(2*p/d,1) - 1 - .5**(2*p/d)))**(1/p)
	
	holder = 0
	for i in xrange(1, np.amin((len(EVAL1), len(EVAL2) )) ):
		holder += (np.abs(EVAL1[i] - EVAL2[i])/(EVAL1[i]*EVAL2[i]))**p
	WESD = holder ** (1/p)
	
	nWESD = WESD/W
	
	return nWESD
Пример #28
0
 def _cumulative_distribution_function(cell,
                                       dataX=None,
                                       minX=None,
                                       alpha=None):
     if dataX is None:
         dataX = cell.dataX
     if minX is None:
         minX = cell.minX
     if alpha is None:
         alpha = cell.alpha
     from scipy.special import zeta
     total_level = len(dataX)
     power_cdf = np.zeros(total_level)
     for xn in range(total_level):
         power_cdf[xn] = zeta(alpha, dataX[xn]) / zeta(alpha, minX)
     return power_cdf
Пример #29
0
def number_density(T):
    """
    Return the photon number density in m^{-3} of a blackbody at
    kelvin temperature T. This is the number of photons per unit
    volume.
    """
    return 16 * pi * zeta(3, 1) * (k * T / (h * c))**2
 def inverse_zeta(val):
     precision = 1e-5
     x = 1
     while True:
         x += precision
         if zeta(x) < val:
             return x
Пример #31
0
def integrate_black_body(T=2.726, s=4):
    """
    Integrate black body over energy,
    multiply with arbritary powers of energy.

    Parameters
    ----------
    T: float
        Temperature of black body spectrum in K, optional
    s: integer
        power of energy in numerator, for s = 4 calculates integral over photon energy density
        so that average energy is integrate_black_body(s = 4) / integrate_black_body(s = 3)

    Returns
    -------
    `~astropy.Quantity` with total energy density

    Notes
    -----
    Gamma(s) Zeta(s) = int_0^\infty x^{s - 1} / (exp(x) - 1)
    See e.g. https://people.physics.tamu.edu/krisciunas/planck.pdf
    """
    result = (c.k_B * T * u.K).to('eV')**s
    result /= ((c.hbar * c.c).to('eV cm')**3. * np.pi**2.)
    result *= zeta(s, 1) * gamma(s)
    return result
Пример #32
0
def mandelbrot_entropy(alpha, beta, dx=1e-10):
    if alpha <= 1.0 or beta <= 1.0:
        raise ValueError("Entropy undefined for the given parameters:\n" +
                         str(alpha) + " and " + str(beta))
    zeta_b = lambda a: zeta(a, beta + 1)
    return alpha * (-derivative(zeta_b, alpha, dx=dx)) / zeta_b(alpha) + lg(
        zeta_b(alpha))
Пример #33
0
def discrete_likelihood_vector(data, xmin, alpharange=(1.5,3.5), n_alpha=201):
    """
    Compute the likelihood for all "scaling parameters" in the range (alpharange)
    for a given xmin.  This is only part of the discrete value likelihood
    maximization problem as described in Clauset et al
    (Equation B.8)

    *alpharange* [ 2-tuple ] 
        Two floats specifying the upper and lower limits of the power law alpha to test
    """
    from scipy.special import zeta as zeta

    zz = data[data>=xmin]
    nn = len(zz)

    alpha_vector = numpy.linspace(alpharange[0],alpharange[1],n_alpha)
    sum_log_data = numpy.log(zz).sum()

    # alpha_vector is a vector, xmin is a scalar
    zeta_vector = zeta(alpha_vector, xmin)

    #xminvec = numpy.arange(1.0,xmin)

    #xminalphasum = numpy.sum([xm**(-alpha_vector) for xm in xminvec])
    #L = -1*alpha_vector*sum_log_data - nn*log(zeta_vector) - xminalphasum

    L_of_alpha = -1*nn*log(zeta_vector) - alpha_vector * sum_log_data

    return L_of_alpha
Пример #34
0
def _generate_min_degree(gamma, average_degree, max_degree, tolerance, max_iters):
    """Returns a minimum degree from the given average degree."""
    # Defines zeta function whether or not Scipy is available
    try:
        from scipy.special import zeta
    except ImportError:

        def zeta(x, q):
            return _hurwitz_zeta(x, q, tolerance)

    min_deg_top = max_degree
    min_deg_bot = 1
    min_deg_mid = (min_deg_top - min_deg_bot) / 2 + min_deg_bot
    itrs = 0
    mid_avg_deg = 0
    while abs(mid_avg_deg - average_degree) > tolerance:
        if itrs > max_iters:
            raise nx.ExceededMaxIterations("Could not match average_degree")
        mid_avg_deg = 0
        for x in range(int(min_deg_mid), max_degree + 1):
            mid_avg_deg += (x ** (-gamma + 1)) / zeta(gamma, min_deg_mid)
        if mid_avg_deg > average_degree:
            min_deg_top = min_deg_mid
            min_deg_mid = (min_deg_top - min_deg_bot) / 2 + min_deg_bot
        else:
            min_deg_bot = min_deg_mid
            min_deg_mid = (min_deg_top - min_deg_bot) / 2 + min_deg_bot
        itrs += 1
    # return int(min_deg_mid + 0.5)
    return round(min_deg_mid)
Пример #35
0
def discrete_likelihood_vector(data, xmin, alpharange=(1.5, 3.5), n_alpha=201):
    """
    Compute the likelihood for all "scaling parameters" in the range (alpharange)
    for a given xmin.  This is only part of the discrete value likelihood
    maximization problem as described in Clauset et al
    (Equation B.8)

    *alpharange* [ 2-tuple ]
        Two floats specifying the upper and lower limits of the power law alpha to test
    """
    from scipy.special import zeta as zeta

    zz = data[data >= xmin]
    nn = len(zz)

    alpha_vector = np.linspace(alpharange[0], alpharange[1], n_alpha)
    sum_log_data = np.log(zz).sum()

    # alpha_vector is a vector, xmin is a scalar
    zeta_vector = zeta(alpha_vector, xmin)

    #xminvec = np.arange(1.0,xmin)

    #xminalphasum = np.sum([xm**(-alpha_vector) for xm in xminvec])
    #L = -1*alpha_vector*sum_log_data - nn*log(zeta_vector) - xminalphasum

    L_of_alpha = -1 * nn * log(zeta_vector) - alpha_vector * sum_log_data

    return L_of_alpha
Пример #36
0
    def _fit_discrete(self, xmin=1):
        """Fit a discrete power-law to data."""
        # update internal xmin
        self.xmin = xmin

        # clip data to be greater than xmin
        data = self.clipped_data

        # calculate the log sum of the data
        lsum = np.log(data).sum()
        # and length, don't want to recalculate these during optimization
        n = len(data)

        def nll(alpha, xmin):
            """Negative log-likelihood of discrete power law."""
            return n * np.log(zeta(alpha, xmin)) + alpha * lsum

        # find best result
        opt = minimize_scalar(nll, args=(xmin,))
        if not opt.success:
            raise RuntimeWarning("Optimization failed to converge")

        # calculate normalization constant
        alpha = opt.x
        C = 1 / zeta(alpha, xmin)

        # save in object
        self.C = C
        self.alpha = alpha

        return C, alpha
Пример #37
0
def check_A9():
    print("Check Dhat - Dlog in (A.9)")
    rhside = [
        -np.euler_gamma,
        (1 / 12) * (6 * pow(np.euler_gamma, 2) + pow(np.pi, 2)),
        (1 / 6) * (-2 * pow(np.euler_gamma, 3) -
                   np.euler_gamma * pow(np.pi, 2) - 4 * sc.zeta(3)),
        (1 / 4) * (pow(np.euler_gamma, 4) + pow(np.euler_gamma * np.pi, 2) +
                   (3 / 20) * pow(np.pi, 4) + 8 * np.euler_gamma * sc.zeta(3))
    ]
    for k in [0, 1, 2, 3]:
        for n in [2, 5, 20, 50, 100, 1000]:
            lhs = D_general(k, n, "soft1") - Dlog_general(k, n)
            rhs = rhside[k]
            diff = lhs - rhs
            print("A9: Dhat{}({}) - Dlog = {}".format(k, n, diff))
Пример #38
0
 def P(x):
     """Cumulative distribution function."""
     try:
         return zeta(alpha, x) / bottom
     except TypeError as e:
         print(alpha, x, r)
         raise e
Пример #39
0
    def __init__(self, d, alpha, c = None):
        self.num_params = 2*d

        if c is None:
            self.c = zeta(alpha, 1) * 2 + 0.1
        else:
            self.c = c

        self.alpha = alpha
Пример #40
0
def expected_find(n, t, alpha):

    sumP = 0

    denom = special.zeta(alpha, 1)

    for i in range(2, n):
        sumP += math.exp(- math.pow(i, -alpha) / denom * t)

    return int(round(n - sumP - 1))
 def _probability_mass_function(cell, dataX = None, minX = None):
     if dataX is None:
         dataX = cell.dataX
     if minX is None:
         minX = cell.minX
     from scipy.special import zeta
     constantN = 1.0 / zeta(cell.alpha, minX)
     xPower = dataX**(-cell.alpha)
     c_xPower = constantN * xPower 
     return (c_xPower) 
Пример #42
0
def compute_decay(N, decayexp, mode, gamma, with1=True):
    if mode == 2:
        start = SampleProblem.get_decay_start(decayexp, gamma)
        A = gamma / zeta(decayexp, start)
    elif mode == 1:
        start = 1
        A = gamma / zeta(decayexp, start)
    assert mode == start

    if with1:
        a = np.zeros(N+1)
        a[0] = 1
        for i in range(N):
            m = i + start
            a[i+1] = A * (m**(-decayexp))
    else:
        a = np.zeros(N)
        for i in range(N):
            m = i + start
            a[i] = A * (m**(-decayexp))

    return a
Пример #43
0
def b_l(l, a):
	'''
	Compute the l'th cumulant for the posterior distribution of the log-odds ratio with contingency table a
	
	Reference:
	'A new approximation of the posterior distribution of the log-odds ratio' - Fredette and Angers, 2002
	'''
	a = [int(a_i) for a_i in a]
	C = [1.,-1.,-1.,1.]
	if l == 1:
		return np.sum( [ c * special.psi(a_j) for c, a_j in zip(C, a) ] )
	else:
		return math.factorial(l-1) * np.sum( [ (-1. * c)**l * special.zeta(l, a_j) for c, a_j in zip(C, a) ]  ) 
Пример #44
0
def mle_alpha_d( data, threshold ) :
## Keep the data observations, that we consider to be in the tail
	tail = np.array( [ v for v in data if v >= threshold ] )
## Estimate the mean log of the peaks over threshold
	sum_log = np.sum( np.log( tail ) ) / ( len( tail ) + 0.0 )
## Define minus log-likelihood of the discrete power law
	loglik = lambda alpha : np.log( zeta( alpha, threshold ) ) + alpha * sum_log
## Compute the ML estimate of the exponent, with a view to using it as the
##  initial seed for the numerical minimizer for better convergence.
	alpha_0 = 1.0 + 1.0 / ( sum_log - np.log( threshold ) )
	res = minimize( loglik, ( alpha_0, ), method = 'Nelder-Mead', options = { 'disp': False } )
## Return the "optimal" argument, regardless of its quality. Potentially DANGEROUS!
	return res.x[ 0 ], float( 'nan' )
Пример #45
0
    def setupCF2(cls, functype, amptype, rvtype='uniform', gamma=0.9, decayexp=2, freqscale=1, freqskip=0, N=1, scale=1, dim=2, secondparam=None):
        try:
            rvs = cls.rv_defs[rvtype]
        except KeyError:
            raise ValueError("Unknown RV type %s", rvtype)

        try:
            func = cls.func_defs[(functype, dim)]
        except KeyError:
            raise ValueError("Unknown function type %s for dim %s", functype, dim)
            

        if amptype == "decay-inf":
            start = SampleProblem.get_decay_start(decayexp, gamma)
            amp = gamma / zeta(decayexp, start)
            ampfunc = lambda i: amp / (float(i) + start) ** decayexp
            logger.info("type is decay_inf with start = " + str(start) + " and amp = " + str(amp))
        elif amptype == "constant": 
            amp = gamma / N
            ampfunc = lambda i: gamma * (i < N)
        else:
            raise ValueError("Unknown amplitude type %s", amptype)

        logger.info("amp function: %s", str([ampfunc(i) for i in range(10)]))
        element = FiniteElement('Lagrange', ufl.triangle, 1)
        # NOTE: the explicit degree of the expression should influence the quadrature order during assembly
        degree = 3

        mis = MultiindexSet.createCompleteOrderSet(dim)
        for i in range(freqskip + 1):
            mis.next()

        a0 = Expression("B", element=element, B=scale)
        if dim == 1:
            a = (Expression(func, freq=freqscale, A=ampfunc(i), B=scale,
                            m=int(mu[0]), degree=degree, element=element) for i, mu in enumerate(mis))
        else:
            a = (Expression(func, freq=freqscale, A=ampfunc(i), B=scale,
                            m=int(mu[0]), n=int(mu[1]),
                            degree=degree, element=element) for i, mu in enumerate(mis))

        if secondparam is not None:
            from itertools import izip
            a0 = (a0, secondparam[0])
            a = ((am, bm) for am, bm in izip(a, secondparam[1]))
        return ParametricCoefficientField(a0, a, rvs)
Пример #46
0
def discrete_likelihood(data, xmin, alpha):
    """
    Equation B.8 in Clauset

    Given a data set, an xmin value, and an alpha "scaling parameter", computes
    the log-likelihood (the value to be maximized) 
    """
    if not scipyOK:
        raise ImportError("Can't import scipy.  Need scipy for zeta function.")
    from scipy.special import zeta as zeta

    zz = data[data>=xmin]
    nn = len(zz)

    sum_log_data = numpy.log(zz).sum()

    zeta = zeta(alpha, xmin)

    L_of_alpha = -1*nn*log(zeta) - alpha * sum_log_data

    return L_of_alpha
Пример #47
0
def power_law_ks_distance(data, alpha, xmin, xmax=None, discrete=False, kuiper=False):
    """Data must be sorted beforehand!"""
    from numpy import arange, sort, mean
    data = data[data>=xmin]
    if xmax:
        data = data[data<=xmax]
    n = float(len(data))
    if n<2:
        if kuiper:
            return 1, 1, 2
        return 1

    if not all(data[i] <= data[i+1] for i in arange(n-1)):
        data = sort(data)

    if not discrete:
        Actual_CDF = arange(n)/n
        Theoretical_CDF = 1-(data/xmin)**(-alpha+1)

    if discrete:
        from scipy.special import zeta
        if xmax:
            Actual_CDF, bins = cumulative_distribution_function(data,xmin=xmin,xmax=xmax)
            Theoretical_CDF = 1 - ((zeta(alpha, bins) - zeta(alpha, xmax+1)) /\
                    (zeta(alpha, xmin)-zeta(alpha,xmax+1)))
        if not xmax:
            Actual_CDF, bins = cumulative_distribution_function(data,xmin=xmin)
            Theoretical_CDF = 1 - (zeta(alpha, bins) /\
                    zeta(alpha, xmin))

    D_plus = max(Theoretical_CDF-Actual_CDF)
    D_minus = max(Actual_CDF-Theoretical_CDF)
    Kappa = 1 + mean(Theoretical_CDF-Actual_CDF)

    if kuiper:
        return D_plus, D_minus, Kappa

    D = max(D_plus, D_minus)

    return D
############################################################
#
#    special functions
#
############################################################

from numpy import *
from scipy import special


# Bessel function of real order v at complex z
print(special.jv(6,2+3j))

# Gamma function
print(special.gamma(5))
print(special.gamma(5.1))

# Zeta function
print(special.zeta(10,2))    



1
Пример #49
0
 def _munp(self, n, a):
     return _lazywhere(
         a > n + 1, (a, n),
         lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1),
         np.inf)
Пример #50
0
 def _pmf(self, k, a):
     Pk = 1.0 / special.zeta(a, 1) / k**a
     return Pk
Пример #51
0
import sys
import os
import scipy.special as ss
import numpy as np
from scipy.optimize import curve_fit

def func(x,a,b,c):
    z=np.zeros(len(x))
    for i in range (len(x)):
        res = 0 
        for j in range (1, int(x[i])+1):
            res += j**(b)
        z[i] = c-res*a
    return z

dataSRG = loadtxt("42part01/interpol.dat")
x = dataSRG[:,0]
y = dataSRG[:,2]



popt, pcov = curve_fit(func, x, y, sigma=None, maxfev=200000, gtol=.00001)
a = popt[2]
b = popt[0]
c = -popt[1]
extrapolated_energy = a-b*ss.zeta(c, 1)
print extrapolated_energy
print a
print b
print c
Пример #52
0
fit_shape = 2.31
ax = fig.add_subplot(121)
ax.tick_params(
    axis='both'
    , which='major'
    , labelsize=config.getint('figure', 'tick_fs') )
plt.plot(
    spans
    , np.array(span_counts) / float(len(spread_span.span_values)) 
    , '.'
    , markersize=config.getint('figure', 'marker_size')
    , markeredgewidth=0
    , markerfacecolor=config.get('figure', 'marker_color') )
y = (
        np.ones((float(len(spans)),)) 
        / spspecial.zeta(fit_shape, 1)
        / np.array(spans)**(fit_shape) )
plt.plot(spans, y, 'k-', markerfacecolor=config.get('figure', 'marker_color'))
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlim([0, max(spans)])
ax.set_xlabel(
    'Source Lifespan (days)'
    , fontsize=config.getint('figure', 'axis_fs') )
ax.set_ylabel(
    'Frequency'
    , fontsize=config.getint('figure', 'axis_fs') )
ax.set_title(
    '(a) Video Source Lifespan'
    , fontsize=config.getint('figure', 'title_fs') )
Пример #53
0
def discrete_CCDF(x,alpha,xmin):
    return special.zeta(alpha,x)/special.zeta(alpha,xmin)
Пример #54
0
def discrete_neg_log_likelihood(a,xmin,xs):
    return len(xs)*np.log(special.zeta(a,xmin)) + a*np.sum(np.log(xs))
Пример #55
0
def zeta_(x):
    return zeta(x, 1.)
Пример #56
0
 def _pmf(self, k, a):
     # zipf.pmf(k, a) = 1/(zeta(a) * k**a)
     Pk = 1.0 / special.zeta(a, 1) / k**a
     return Pk
Пример #57
0
def discrete_neg_log_likelihood(a,xmin,xs):
    while True:
        try:
            xs.remove(0)
        except ValueError:
            return float(len(xs)*np.log(special.zeta(a,xmin)) + a*sum([np.log(x) for x in xs]))
Пример #58
0
def discrete_PDF(x,alpha,xmin):
    return 1./((x**alpha)*special.zeta(alpha,xmin))
Пример #59
0
def mu(n, D, mus):
    if mus[n-1] != 0.:
        return mus[n-1]
    else:
        u = 0.
        if D == 3:
            if n == 1:
                u = -sqrt(pi)
            elif(n % 2 == 0):
                u = float((-1)**(n-2))/float(factorial(n-2,exact=1)) * factorial((n-2)/2,exact=1) * zeta(n,1)
            else:
                u = float((-1)**(n-2))/float(factorial(n-2,exact=1)) * gamma(1.+(n-2.)/2.) * zeta(n,1)
        elif D ==2:
            if(n % 2 == 0):
                u = float((-1)**(n-1))/float(factorial(n-1,exact=1)) * gamma(1.+(n-1.)/2.) * (float(1-(2**(n+1)))/sqrt(pi)) * zeta(n+1,1)
            else:
                u = float((-1)**(n-1))/float(factorial(n-1,exact=1)) * factorial((n-1)/2,exact=1) * (float(1-(2**(n+1)))/sqrt(pi)) * zeta(n+1,1)
        mus[n-1] = u
        return u
Пример #60
0
def discrete_CCDF(x,alpha,xmin):
    return float(special.zeta(alpha,x))/special.zeta(alpha,xmin)