def invgammapdf(x, alpha, beta): alpha = float(alpha) beta = float(beta) if not np.isscalar(x): return (beta**alpha / math.gamma(alpha))*np.array([(xi**(-alpha - 1))*math.exp(-beta/xi) for xi in x]) else: return (beta**alpha / math.gamma(alpha))*(x**(-alpha - 1))*math.exp(-beta/x)
def _calculate_exponential_params(self, moment_1=2, moment_2=4): """ Calculate Exponential DSD parameters. Calculate Exponential DSD parameters using method of moments. The choice of moments is given in the parameters. Uses method from [1] Parameters: moment_1: float First moment to use. moment_2: float Second moment to use. References: ------ [1] Zhang, et. al., 2008, Diagnosing the Intercept Parameter for Exponential Raindrop Size Distribution Based on Video Disdrometer Observations: Model Development. J. Appl. Meteor. Climatol., https://doi.org/10.1175/2008JAMC1876.1 """ m1 = self._calc_mth_moment(moment_1) m2 = self._calc_mth_moment(moment_2) num = m1 * gamma(moment_2 + 1) den = m2 * gamma(moment_1 + 1) Lambda = np.power(np.divide(num, den), (1 / (moment_2 - moment_1))) N0 = m1 * np.power(Lambda, moment_1 + 1) / gamma(moment_1 + 1) return Lambda, N0
def f(d, k): delta = float(0.5*(d)) #make correction to Reid's fomula, which had delta = (d+1)/2 top1 = float(math.gamma(delta)) * float(math.gamma(2*delta)) top2 = math.gamma(2*delta + 1) top = top1 * (top2 ** (k-1)) bottom = float((2**(k-1))) * float(k) * float(math.gamma(k * delta)) * float(math.gamma( (k+1)*delta )) return float((top/bottom))
def G_3(d, i_1, i_2, i_3): p = G_2(d, i_1, i_2) * 1./(3*(d+1) + 2*(i_1 + i_2 + i_3)) p *= math.gamma(d + i_3 + 1) p /= math.gamma(i_3 + 1) p *= math.gamma(d + 2 + i_1 + i_2 + i_3) p /= math.gamma(2*d + 2 + i_1 + i_2 + i_3) return p
def G_2(d, i_1, i_2): p = G_1(d, i_1) * 1./(2*(d + 1 + i_1 + i_2)) p *= math.gamma(d + i_2 + 1) p /= math.gamma(i_2 + 1) p *= math.gamma(i_1 + i_2 + (d+3)/2.) p /= math.gamma(i_1 + i_2 + (d+1)*(3./2.)) return p
def variance(r0=None,L0=None,atmosphere=None): if atmosphere is not None: r0 = atmosphere.r0 L0 = atmosphere.L0 L0r0ratio= (L0/r0)**(5./3) return (24*math.gamma(6./5)/5.)**(5./6)* \ (math.gamma(11./6)*math.gamma(5./6)/(2.*math.pi**(8./3)))*L0r0ratio
def get_curve(self): # If is already computed just return it if self.curve is not None: return self.curve length = 20 # maximum length in sec - see later if this need to be calculated differently k = self.k theta = self.theta theta_up = self.theta_up # This is our time vector (just the length of the gamma atom): t = np.linspace(0, length, length*self.fs, endpoint=False) # np.vectorize is not really vectorized, it's just nicer way to loop gamma_function_up = np.vectorize(lambda tt: 1/(math.gamma(k)*theta_up**k)*tt**(k-1)*math.exp(-tt/theta_up)) gamma_function_down = np.vectorize(lambda tt: 1/(math.gamma(k)*theta**k)*tt**(k-1)*math.exp(-tt/theta)) gamma_atom_up = gamma_function_up(t) gamma_atom_down = gamma_function_down(t) # stick them together : ) gamma_atom_up = gamma_atom_up[:np.argmax(gamma_atom_up)] / np.max(gamma_atom_up) gamma_atom_down = gamma_atom_down[np.argmax(gamma_atom_down):] / np.max(gamma_atom_down) gamma_atom = np.concatenate((gamma_atom_up, gamma_atom_down)) gamma_atom /= linalg.norm(gamma_atom) # this preserves array and eliminates for return gamma_atom
def calculate(v, x): # Approximation of the boys function for small x if x <= 25: i = 0 ans = 0 while 1 > 0: seq = (gamma(v + 0.5) / gamma(v + i + 1.5)) * x**i if seq < 1e-10: break ans += seq i += 1 ans *= (1/2) * exp(-x) return ans # Approximation of the boys function for large x elif x > 25: i = 0 ans = 0 while 1 > 0: seq = (gamma(v + 0.5) / gamma(v - i + 1.5)) * x**(-i) if seq < 1e-10: break ans += seq i += 1 ans *= (1/2) * exp(-x) ans = (gamma(v + 0.5) / (2*x**(v + 0.5))) - ans return ans
def c3(psi): r"""Third Stumpff function. For positive arguments: .. math:: c_3(\psi) = \frac{\sqrt{\psi} - \sin{\sqrt{\psi}}}{\sqrt{\psi^3}} """ eps = 1.0 if psi > eps: res = (np.sqrt(psi) - np.sin(np.sqrt(psi))) / (psi * np.sqrt(psi)) elif psi < -eps: res = (np.sinh(np.sqrt(-psi)) - np.sqrt(-psi)) / (-psi * np.sqrt(-psi)) else: res = 1.0 / 6.0 delta = (-psi) / gamma(2 + 3 + 1) k = 1 while res + delta != res: res = res + delta k += 1 delta = (-psi) ** k / gamma(2 * k + 3 + 1) return res
def incomplete_gamma2( dA, dX ): if ( dA < 0 ) or ( dX < 0 ): return None if not dX: return 0 xam = -dX + dA * math.log( dX ) if ( xam > 700 ) or ( dA > 170 ): return 1 if dX <= ( dA + 1 ): r = s = 1.0 / dA for k in range( 1, 61 ): r *= float(dX) / ( dA + k ) s += r if abs( r / s ) < 1e-15: break ga = math.gamma( dA ) gin = math.exp( xam ) * s return ( gin / ga ) t0 = 0 for k in range( 60, 0, -1 ): t0 = float(k - dA) / ( 1 + ( float(k) / ( dX + t0 ) ) ) gim = math.exp( xam ) / ( dX + t0 ) ga = math.gamma( dA ) return ( 1 - ( gim / ga ) )
def uniform_normalstd_multiple_conds_with_shared_sigma(self, sigma_0, mu_0, n_subjs, seed, use_metropolis): """test estimation of Normal distribution std with uniform prior sigma_0 - the value of the std noe mu_0 - the value of the mu node use_metropolis - should it use metropolis to evaluate the sampled mean instead of the UniformPriorNormalstd """ np.random.seed(seed) n_conds = len(mu_0) nodes, x_values = self.create_nodes_for_PriorNormalstd(n_subjs, sigma_0, mu_0, prior=pm.Uniform) sigma = nodes['sigma'] mm = pm.MCMC(nodes) if use_metropolis: mm.sample(20000,5000) else: mm.use_step_method(kabuki.steps.UniformPriorNormalstd, sigma) mm.sample(10000) #calc the new distrbution alpha = (n_subjs*n_conds - 1) / 2. beta = 0 for i_cond in range(n_conds): cur_x_values = x_values[i_cond*n_subjs:(i_cond+1)*n_subjs] beta += sum([(x - mu_0[i_cond])**2 for x in cur_x_values]) / 2. true_mean = math.gamma(alpha-0.5)/math.gamma(alpha)*np.sqrt(beta) anal_var = beta / (alpha - 1) - true_mean**2 true_std = np.sqrt(anal_var) self.assert_results(sigma, sigma_0, true_mean, true_std) return mm
def test_gamma(self): import math assert raises(ValueError, math.gamma, 0.0) assert math.gamma(5.0) == 24.0 assert math.gamma(6.0) == 120.0 assert raises(ValueError, math.gamma, -1) assert math.gamma(0.5) == math.pi ** 0.5
def __init__(self,model_name,n,rho0,Re,M2L,I0,MBH_Msun,r0,generate): #model name self.name = model_name #Sersic index self.n = n #Effective radius (half-light radius) self.Re = Re #Scale radius self.r0 = r0 #Mass to light ratio self.M2L = M2L #Intensity at r0 self.I0 = I0 #Mass of the black hole in units of Msun self.MBH = MBH_Msun #determine other parameters based on n if self.n <10. and self.n > 0.6: self.b = 2*self.n - (1./3.) + 0.009876/self.n self.p = 1. - 0.6097/self.n + 0.05563/self.n**2 #density at r0 self.rho0 = M2L*I0*(self.b**(self.n*(1-self.p)))*(math.gamma(2*self.n)/(2*Re*math.gamma(self.n*(3-self.p)))) #Coulomb logarithm self.Lam = self.MBH*0.4 #black hole mass normalized to galaxy density and radius self.Mnorm = self.MBH/(self.rho0*(self.r0)**3) #tidal disruption radius self.rT = Rsun*(self.MBH)**(1./3) #number of tidal radii to span galaxy self.r0_rT=(self.r0*pc)/self.rT #dynamical timescale self.tdyn0 = ((Gconst*self.rho0*realMsun)/pc**3)**(-1./2) #start a new directory? self.generate = generate #directory name self.directory = 'SersicRhoGals/{0}_GenRho_n{1}_MBH{2}'.format(self.name,self.n.self.MBH)
def compute_gamma(a=0.5, h=2.0, A=math.sqrt(2), resolution=500, range=[0,7]): """Return plot and mean/st.dev. value of the gamma density.""" print 'range:', type(range), range gah = math.gamma(a + 1./h) mean = A*gah/math.gamma(a) stdev = A/math.gamma(a)*math.sqrt( math.gamma(a + 2./h)*math.gamma(a) - gah**2) x = linspace(0, range[1]*stdev, resolution+1) y = gamma_density(x, a, h, A) plt.figure() # needed to avoid adding curves in plot plt.plot(x, y) plt.title('a=%g, h=%g, A=%g' % (a, h, A)) if not os.path.isdir('static'): os.mkdir('static') else: # Remove old plot files for filename in glob.glob(os.path.join('static', '*.png')): os.remove(filename) # Use time since Jan 1, 1970 in filename in order make # a unique filename that the browser has not chached t = str(time.time()) plotfile1 = os.path.join('static', 'density_%s.png' % t) plotfile2 = os.path.join('static', 'cumulative_%s.png' % t) plt.savefig(plotfile1) y = gamma_cumulative(x, a, h, A) plt.figure() plt.plot(x, y) plt.grid(True) plt.savefig(plotfile2) return plotfile1, plotfile2, mean, stdev
def test_triangularelement( self ): MAXORDER = 7 roottrans = transform.roottrans( 'test', (0,0) ) elem = element.Element( element.SimplexReference(2), roottrans ) F = lambda a,b: gamma(1+a)*gamma(1+b)/gamma(3+a+b) self._test( MAXORDER, elem, F )
def test_tetrahedralelement( self ): MAXORDER = 8 roottrans = transform.roottrans( 'test', (0,0,0) ) elem = element.Element( element.SimplexReference(3), roottrans ) F = lambda a,b,c: gamma(1+a)*gamma(1+b)*gamma(1+c)/gamma(4+a+b+c) self._test ( MAXORDER, elem, F )
def __init__(self, alpha): '''Creates Dirichlet distribution with parameter `alpha`.''' from math import gamma from operator import mul self._alpha = np.array(alpha) self._coef = gamma(np.sum(self._alpha)) / \ reduce(mul, [gamma(a) for a in self._alpha])
def crp_lh(theta, partition): n = sum([len(s) for s in partition]) lh = 0 lh += safety_log( math.gamma(theta)*theta**len(partition) / math.gamma(theta + n) ) for subset in partition: lh += safety_log(math.gamma(len(subset))) return lh
def set_constants( self ): ''' We assume the user gives us an the parameters gamma and kappa such that the covariance is ( - gamma * Laplacian + kappa^2 )^{-2}. Then we modify these parameters such that ''' self.kappa = math.sqrt( self.alpha / self.gamma ) assert np.isreal( self.kappa ) # We factor out gamma, so we scale kappa # accordingly. Later we compensate # Here we compensate - we now have covariance # [ gamma * (-Delta + kappa^2 / gamma ) ]^2 self.sig2 = ( math.gamma( self.nu ) / math.gamma( 2 ) / (4*math.pi)**(self.dim/2.) / self.alpha**( self.nu ) / self.gamma**( self.dim/2.) ) self.sig = math.sqrt( self.sig2 ) self.factor = self.sig2 * 2**(1-self.nu) / math.gamma( self.nu ) self.ran = ( 0.0, 1.3 * self.sig2 )
def __init__(self, alpha): from math import gamma from operator import mul self._alpha = np.array(alpha) self._coef = gamma(np.sum(self._alpha)) / \ reduce(mul, [gamma(a) for a in self._alpha])
def dirichlet_pdf(alpha): k = len(alpha) gamma_sum = gamma(sum(alpha)) product_gamma = reduce(multiply, [gamma(a) for a in alpha]) beta = product_gamma / gamma_sum return lambda x: reduce(multiply, [x[i] ** (alpha[i] - 1) for i in xrange(k)])
def c2(psi): r"""Second Stumpff function. For positive arguments: .. math:: c_2(\psi) = \frac{1 - \cos{\sqrt{\psi}}}{\psi} """ eps = 1.0 if psi > eps: res = (1 - np.cos(np.sqrt(psi))) / psi elif psi < -eps: res = (np.cosh(np.sqrt(-psi)) - 1) / (-psi) else: res = 1.0 / 2.0 delta = (-psi) / gamma(2 + 2 + 1) k = 1 while res + delta != res: res = res + delta k += 1 delta = (-psi) ** k / gamma(2 * k + 2 + 1) return res
def kn(self, n): """ return Kn, where UMVU estimator of std is Kn*S """ try: return math.sqrt(2./(n-1)) * (math.gamma(n/2.) / (math.gamma((n-1.)/2.))) except: return 1.
def c(q): """normalization factor""" if -float('inf') < q < 1: return (2 * sqrt(pi) * gamma(1/(1-q))) / ((3 - q) * sqrt(1 - q) * gamma((3 - q) / (2 * (q - 1)))) if q == 1: return sqrt(pi) if 1 < q < 3: return (sqrt(pi) * gamma((3-q)/2*(q-1))) / (sqrt(q-1) * gamma(1/(q-1)))
def yangdeb_flight(m=1, n=1): beta = 3/2 sigma = ((math.gamma(1 + beta) * np.sin(np.pi * beta / 2)) / (math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2))) ** \ (1 / beta) u = np.random.normal(size=[m, n]) * sigma v = np.random.normal(size=[m, n]) return u / abs(v) ** (1 / beta)
def sampleTimeGap(degree, lifetime): timeGap = int(round((math.gamma(2 - alpha) * pow((beta * degree), -1))/math.gamma(1 - alpha))) while(timeGap == 0): timeGap = int(round((math.gamma(2 - alpha) * pow((beta * degree), -1))/math.gamma(1 - alpha))) if timeGap > lifetime: return lifetime return timeGap
def constant_potential_twosphere_identical(phi01, phi02, r1, r2, R, kappa, epsilon): # From Carnie+Chan 1993 N = 20 # Number of terms in expansion qe = 1.60217646e-19 Na = 6.0221415e23 E_0 = 8.854187818e-12 cal2J = 4.184 index = arange(N, dtype=float) + 0.5 k1 = special.kv(index, kappa*r1)*sqrt(pi/(2*kappa*r1)) k2 = special.kv(index, kappa*r2)*sqrt(pi/(2*kappa*r2)) i1 = special.iv(index, kappa*r1)*sqrt(pi/(2*kappa*r1)) i2 = special.iv(index, kappa*r2)*sqrt(pi/(2*kappa*r2)) B = zeros((N,N), dtype=float) for n in range(N): for m in range(N): for nu in range(N): if n>=nu and m>=nu: g1 = gamma(n-nu+0.5) g2 = gamma(m-nu+0.5) g3 = gamma(nu+0.5) g4 = gamma(m+n-nu+1.5) f1 = factorial(n+m-nu) f2 = factorial(n-nu) f3 = factorial(m-nu) f4 = factorial(nu) Anm = g1*g2*g3*f1*(n+m-2*nu+0.5)/(pi*g4*f2*f3*f4) kB = special.kv(n+m-2*nu+0.5,kappa*R)*sqrt(pi/(2*kappa*R)) B[n,m] += Anm*kB M = zeros((N,N), float) for i in range(N): for j in range(N): M[i,j] = (2*i+1)*B[i,j]*i1[i] if i==j: M[i,j] += k1[i] RHS = zeros(N) RHS[0] = phi01 a = solve(M,RHS) a0 = a[0] U = 4*pi * ( -pi/2 * a0/phi01 * 1/sinh(kappa*r1) + kappa*r1 + kappa*r1/tanh(kappa*r1) ) # print 'E: %f'%U C0 = qe**2*Na*1e-3*1e10/(cal2J*E_0) C1 = r1*epsilon*phi01*phi01 E_inter = U*C1*C0 return E_inter
def test_gamma(): AlmostEqual(math.gamma(0.5), math.sqrt(math.pi), 15) for i in xrange(1, 20): AreEqual(math.factorial(i-1), math.gamma(i)) AreEqual(math.gamma(float('inf')), float('inf')) AssertError(ValueError, math.gamma, float('-inf')) Assert(math.isnan(math.gamma(float('nan')))) for i in xrange(0, -1001, -1): AssertError(ValueError, math.gamma, i)
def test_elbo(): I,J,K = 5,3,2 R = numpy.ones((I,J)) M = numpy.ones((I,J)) M[0,0], M[2,2], M[3,1] = 0, 0, 0 # size Omega = 12 lambdaU = 2*numpy.ones((I,K)) lambdaV = 3*numpy.ones((J,K)) alpha, beta = 3, 1 priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV } expU = 5*numpy.ones((I,K)) expV = 6*numpy.ones((J,K)) varU = 11*numpy.ones((I,K)) varV = 12*numpy.ones((J,K)) exptau = 8. explogtau = 9. muU = 14*numpy.ones((I,K)) muV = 15*numpy.ones((J,K)) tauU = numpy.ones((I,K))/100. tauV = numpy.ones((J,K))/101. alpha_s = 20. beta_s = 21. # expU * expV = [[60]] # (R - expU*expV)^2 = 12*59^2 = 41772 # Var[U*V] = 12*K*((11+5^2)*(12+6^2)-5^2*6^2) = 12*2*828 = 19872 # -muU*sqrt(tauU) = -14*math.sqrt(100) = -1.4 # -muV*sqrt(tauV) = -15*math.sqrt(101) = -1.4925557853149838 # cdf(-1.4) = 0.080756659233771066 # cdf(-1.4925557853149838) = 0.067776752211548219 ELBO = 12./2.*(explogtau - math.log(2*math.pi)) - 8./2.*(41772+19872) \ + 5*2*(math.log(2.) - 2.*5.) + 3*2*(math.log(3.) - 3.*6.) \ + 3.*numpy.log(1.) - numpy.log(math.gamma(3.)) + 2.*9. - 1.*8. \ - 20.*numpy.log(21.) + numpy.log(math.gamma(20.)) - 19.*9. + 21.*8. \ - 0.5*5*2*math.log(1./100.) + 0.5*5*2*math.log(2*math.pi) + 5*2*math.log(1.-0.080756659233771066) \ + 0.5*5*2*1./100.*(11.+81.) \ - 0.5*3*2*math.log(1./101.) + 0.5*3*2*math.log(2*math.pi) + 3*2*math.log(1.-0.067776752211548219) \ + 0.5*3*2*1./101.*(12.+81.) BNMF = bnmf_vb_optimised(R,M,K,priors) BNMF.expU = expU BNMF.expV = expV BNMF.varU = varU BNMF.varV = varV BNMF.exptau = exptau BNMF.explogtau = explogtau BNMF.muU = muU BNMF.muV = muV BNMF.tauU = tauU BNMF.tauV = tauV BNMF.alpha_s = alpha_s BNMF.beta_s = beta_s assert BNMF.elbo() == ELBO
def mmd_variance(D, k): """ Calculate variance of MMD estimate Currently only know this for k=2 but that's what we most commonly use""" assert k==2, 'Only known for k=2' gamma_arg = 1 + ((math.gamma(3.*D/2.) * 4)/(math.gamma(D/2.) * math.gamma(D+1))) var_c2 = 2. * mmd_formula(D, 3) * gamma_arg var_c2 += mmd_formula(D, 2) return var_c2
def expected(self): return self.alpha*gamma(1.+1./self.beta)
kilogramToPound(float(vlera),s,c) else: s.sendto("Pergjigja:Kjo kerkese nuk mund te shqyrohet.Kerko dicka tjeter.".encode("UTF-8"),c) elif metoda[0].decode("UTF-8")=="PAGA" and len(metoda)==2 and metoda[1].replace('.','',1).isdigit(): bruto=float(data[4:]) if bruto<0: s.sendto("Vlere invalide e pages bruto.".encode("utf-8"),c) else: paga(bruto,s,c) elif metoda[0].decode("utf-8")=="FAKTORIEL" and len(metoda)==2 and metoda[1].replace('.','',1).isdigit(): numri=metoda[1] if "." not in numri: faktorieli(int(numri),s,c) elif "." in numri: numri=float(metoda[1]) s.sendto(str(round(math.gamma(numri+1),5)).encode("utf-8"),c) else: s.sendto("Pergjigja:Kjo kerkese nuk mund te shqyrohet.Kerko dicka tjeter.".encode("UTF-8"),c) except: pass def main(): while True: request, adresaDerguesit = serverSocket.recvfrom(2048) print("Kerkese e re nga: "+str(adresaDerguesit[0])) threading._start_new_thread(handleClient,(serverSocket,adresaDerguesit,request)) main() serverSocket.close()
def B(alpha, beta): """a normalizing constant so that the total probability is 1""" return math.gamma(alpha) * math.gamma(beta) / math.gamma(alpha + beta)
def lower_incomplete_gamma2(a, x): return gamma(a) - upper_incomplete_gamma2(a, x)
def genEVmpoTaylor(n, a, ng=40): # Quadrature sfac = 1.0 / (1.0 + 0.5 * kappa0**2 * a**2) # OBC tij = numpy.diag([1.0] * n) for i in range(n - 1): tij[i, i + 1] = tij[i + 1, i] = -0.5 * sfac cfac = Aref #*2.0*kappa0 nfac = numpy.power(cfac*numpy.sqrt(numpy.linalg.det(tij)),1.0/n)/ \ numpy.sqrt(numpy.pi) import math def denorm(n1, n2=None): if n2 == None: return 1.0 / math.sqrt(float(math.factorial(n1))) else: return 1.0 / math.sqrt( float(math.factorial(n1)) * float(math.factorial(n2))) # A,B,D A0 = nfac*numpy.array([0.5*(1+(-1)**n1)*sfac**(n1/2.0)*math.gamma((n1+1.0)/2.0)\ *denorm(n1) for n1 in range(ng)]) B0 = nfac*numpy.array([0.5*(1-(-1)**n1)*sfac**(n1/2.0)*math.gamma((n1+2.0)/2.0)\ *denorm(n1) for n1 in range(ng)])*numpy.sqrt(sfac*a) D0 = nfac*numpy.array([0.5*(1+(-1)**n1)*sfac**(n1/2.0)*math.gamma((n1+3.0)/2.0)\ *denorm(n1) for n1 in range(ng)])*(sfac*a) A1 = nfac*numpy.array([[0.5*(1+(-1)**(n1+n2))*sfac**((n1+n2)/2.0)*math.gamma((n1+n2+1.0)/2.0)\ *denorm(n1,n2) for n1 in range(ng)] for n2 in range(ng)]) B1 = nfac*numpy.array([[0.5*(1-(-1)**(n1+n2))*sfac**((n1+n2)/2.0)*math.gamma((n1+n2+2.0)/2.0)\ *denorm(n1,n2) for n1 in range(ng)] for n2 in range(ng)])*numpy.sqrt(sfac*a) D1 = nfac*numpy.array([[0.5*(1+(-1)**(n1+n2))*sfac**((n1+n2)/2.0)*math.gamma((n1+n2+3.0)/2.0)\ *denorm(n1,n2) for n1 in range(ng)] for n2 in range(ng)])*(sfac*a) # Construction of MPOs idn = numpy.identity(4) nii = numpy.zeros((4, 4)) nii[1, 1] = 1.0 nii[2, 2] = 1.0 nud = numpy.zeros((4, 4)) nud[3, 3] = 1.0 # first [A0,B0,D0] site0 = numpy.zeros((1, 3 * ng, 4, 4)) site0[0, :ng] = numpy.einsum('a,mn->amn', A0, idn) site0[0, ng:2 * ng] = numpy.einsum('a,mn->amn', B0, nii) site0[0, 2 * ng:] = numpy.einsum('a,mn->amn', D0, nud) # last [D0,B0,A0] site1 = numpy.zeros((3 * ng, 1, 4, 4)) site1[:ng, 0] = numpy.einsum('a,mn->amn', D0, nud) site1[ng:2 * ng, 0] = numpy.einsum('a,mn->amn', B0, nii) site1[2 * ng:, 0] = numpy.einsum('a,mn->amn', A0, idn) # centeral # [A1,B1,D1] # [ 0,A1,B1] # [ 0, 0,A1] site2 = numpy.zeros((3 * ng, 3 * ng, 4, 4)) site2[:ng, :ng] = numpy.einsum('ab,mn->abmn', A1, idn) site2[ng:2 * ng, ng:2 * ng] = numpy.einsum('ab,mn->abmn', A1, idn) site2[2 * ng:, 2 * ng:] = numpy.einsum('ab,mn->abmn', A1, idn) site2[:ng, ng:2 * ng] = numpy.einsum('ab,mn->abmn', B1, nii) site2[ng:2 * ng, 2 * ng:] = numpy.einsum('ab,mn->abmn', B1, nii) site2[:ng, 2 * ng:] = numpy.einsum('ab,mn->abmn', D1, nud) sites = [site0] + [site2] * (n - 2) + [site1] vmpo = class_mpo(n, sites) return vmpo
def subbtion_function(beta, sigma, mu, x): left = beta / (2 * sigma * math.gamma(1 / beta)) sub = abs(x - mu) / sigma right = math.exp(-math.pow(sub, beta)) return left * right
import math n = float(input('Degite um número real: ')) resul = math.gamma(n) print('O valor sem vírgula {}'.format(resul))
def nball_volume(R, k=3): """ Calculate the volume of a n-shpere. This is used for the analytical randoms. """ return (np.pi**(k / 2.0) / gamma(k / 2.0 + 1.0)) * R**k
results_robin = np.zeros((number_of_tests, np.size(rhos))) ####WARNING! SciPy routine jn_zeros() hangs without any error message when trying to compute zeros for these orders: # more on this: https://github.com/scipy/scipy/issues/11994 banned_list_for_n = [ 231, 244, 281, 288, 347, 348, 357, 405, 406, 419, 437, 505, 506, 507, 570, 582, 591, 643, 644, 655, 658, 679, 706, 713, 722, 752, 756, 757, 764, 775, 793, 796, 811, 820, 840, 855, 875, 886, 916, 942, 948, 966 ] # These n are saved in extra list and the loop just skips them for m in range(0, np.size(rhos)): rho = rhos[m] print('rho = %.2f' % rho) alphalapl = nu + 1. kappa = np.sqrt(2 * nu) / rho eta = np.sqrt(sigma**2 * 4 * math.pi * math.gamma(nu + 1) / ((kappa**2) * math.gamma(nu))) true = not_nodal.true_cov_mat(x, y, kappa, nu) # GET THE TRUE MATRIX for i in range(0, number_of_tests): # ENTER MAIN LOOP OVER WINDOW SIZES beta = betas_over_k[i] * kappa print('beta = %.2f' % beta) expression_robin = np.zeros((vert, vert)) for n in range(0, param_n): if (n in banned_list_for_n): continue # skip the step if n is in the banned list print(n, i) a = ss.jnp_zeros(
def a(l, p, d): ret_value = math.gamma(1.0 / p)**(2 * d - l) ret_value *= math.gamma(2.0 / p)**(l - d) ret_value /= math.gamma(float(l) / p + 1.0) ret_value *= (-1)**(l - d) * binom_coeff(d, l - d) * (2.0 / p)**d return ret_value
def integrate_monomial_over_enr2(k): if numpy.any(k % 2 == 1): return 0 return numpy.prod([math.gamma((kk + 1) / 2.0) for kk in k])
def test_eval_gradient(self): top = set_as_top(Assembly()) top.add('comp1', Simple()) top.run() exp = ExprEvaluator('3.0*comp1.c', top.driver) grad = exp.evaluate_gradient(scope=top) self.assertEqual(top.comp1.c, 7.0) assert_rel_error(self, grad['comp1.c'], 3.0, 0.00001) # Commented out this test, until we find a case that can't be # handled analytically # interface test: step size # (for linear slope, larger stepsize more accurate because of # python's rounding) #grad2 = exp.evaluate_gradient(scope=top, stepsize=0.1) #assert( abs(grad['comp1.c'] - 3.0) > abs(grad2['comp1.c'] - 3.0) ) # More complicated, multiple comps top.add('comp2', Simple()) exp = ExprEvaluator('comp2.b*comp1.c**2', top.driver) grad = exp.evaluate_gradient(scope=top) self.assertEqual(len(grad), 2) assert_rel_error(self, grad['comp1.c'], 70.0, 0.00001) assert_rel_error(self, grad['comp2.b'], 49.0, 0.00001) # test limited varset grad = exp.evaluate_gradient(scope=top, wrt=['comp2.b']) self.assertEqual(len(grad), 1) exp = ExprEvaluator('pow(comp2.b,2)', top.driver) grad = exp.evaluate_gradient(scope=top) assert_rel_error(self, grad['comp2.b'], 10.0, 0.00001) exp = ExprEvaluator('pow(comp2.b,3)', top.driver) grad = exp.evaluate_gradient(scope=top) assert_rel_error(self, grad['comp2.b'], 75.0, 0.00001) exp = ExprEvaluator('log(comp2.a)', top.driver) grad = exp.evaluate_gradient(scope=top) assert_rel_error(self, grad['comp2.a'], 1. / top.comp2.a, 0.00001) exp = ExprEvaluator('sin(cos(comp2.b))+sqrt(comp2.a)/comp1.c', top.driver) grad = exp.evaluate_gradient(scope=top) g1 = -sin(top.comp2.b) * cos(cos( top.comp2.b)) #true gradient components g2 = (2 * sqrt(top.comp2.a) * top.comp1.c)**-1 g3 = -sqrt(top.comp2.a) / top.comp1.c**2 assert_rel_error(self, grad['comp2.b'], g1, 0.00001) assert_rel_error(self, grad['comp2.a'], g2, 0.00001) assert_rel_error(self, grad['comp1.c'], g3, 0.00001) exp = ExprEvaluator('gamma(comp2.a)', top.driver) grad = exp.evaluate_gradient(scope=top) from scipy.special import polygamma g1 = gamma(top.comp2.a) * polygamma( 0, top.comp2.a) #true partial derivative assert_rel_error(self, grad['comp2.a'], g1, 0.001) exp = ExprEvaluator('abs(comp2.a)', top.driver) grad = exp.evaluate_gradient(scope=top) assert_rel_error(self, grad['comp2.a'], 1.0, 0.0001)
def generalizedLikelihoodFunction(data, comparedata, measerror=None, params=None): """ Under the assumption of having correlated, heteroscedastic, and non‐Gaussian errors and assuming that the data are coming from a time series modeled as .. math:: \\Phi_p(B)e_t = \\sigma_t a_t with `a_t` is an i.i.d. random error with zero mean and unit standard deviation, described by a skew exponential power (SEP) density the likelihood `p` can be calculated as follows: .. math:: p = \\frac{2\\sigma_i}{\\xi+\\xi^{-1}}\\omega_\\beta exp(-c_\\beta |a_{\\xi,t}|^{2/(1+\\beta)}) where .. math:: a_{\\xi,t} = \\xi^{-sign(\\mu_\\xi+\\sigma_\\xi a_t )}(\\mu_\\xi+\\sigma_\\xi a_t) For more detailes see: http://onlinelibrary.wiley.com/doi/10.1029/2009WR008933/epdf, page 3, formualar (6) and pages 15, Appendix A. `Usage:` Maximizing the likelihood value guides to the best model. :param data: observed measurements as a numerical list :type data: list :param comparedata: simulated data from a model which should fit the original data somehow :type comparedata: list :param measerror: measurement errors of every data input, if nothing is given a standart calculation is done to simulate measurement errors :type measerror: list :param params: Contains a tuple of model parameter which are needed for calculating the likelihood. Where the first component contains the values and the second the names of the valules. Following parameter are needed in this function: -1 < `likelihood_beta` < 1, 0 < `likelihood_xi` <= 10, 0 <= `likelihood_sigma0` <= 1, 0 <= `likelihood_sigma1` <= 1, 0 <= `likelihood_phi1` < 1, 0 <= `likelihood_muh` <= 100 :type params: tuple :return: the p value as a likelihood :rtype: float """ __standartChecksBeforeStart(data, comparedata) errorArr = __calcSimpleDeviation(data, comparedata) if measerror is None: measerror = __generateMeaserror(data) measerror = np.array(measerror) comparedata = np.array(comparedata) measerror = __jitter_measerror_if_needed("generalizedLikelihoodFunction", measerror) paramDependencies = [ "likelihood_beta", "likelihood_xi", "likelihood_sigma0", "likelihood_sigma1", "likelihood_phi1", "likelihood_muh" ] if params is None: # for this params look into http://onlinelibrary.wiley.com/doi/10.1029/2009WR008933/epdf, page 5 beta = np.random.uniform(-0.99, 1, 1) xi = np.random.uniform(0.01, 10, 1) sigma0 = np.random.uniform(0, 1, 1) sigma1 = np.random.uniform(0, 1, 1) phi1 = np.random.uniform(0, .99, 1) muh = np.random.uniform(0, 100, 1) else: missingparams = [] randomparset, parameternames = params parameternames = np.array(parameternames) randomparset = np.array(randomparset) for nm in paramDependencies: if nm not in parameternames: missingparams.append(nm) if missingparams.__len__() > 0: raise LikelihoodError( "Unfortunately contains your param list not all parameters which are needed for this class." "Following parameter are needed, too: " + str(missingparams)) beta = float( randomparset[np.where(parameternames == 'likelihood_beta')]) xi = float(randomparset[np.where(parameternames == 'likelihood_xi')]) sigma0 = float( randomparset[np.where(parameternames == 'likelihood_sigma0')]) sigma1 = float(randomparset[parameternames == 'likelihood_sigma0']) phi1 = float( randomparset[np.where(parameternames == 'likelihood_phi1')]) muh = float(randomparset[np.where(parameternames == 'likelihood_muh')]) # Break the calculation if given parameter are not valid if beta <= -1 or beta > 1: warnings.warn( "The parameter 'beta' should be greater then -1 and less equal 1 and is: " + str(beta)) return np.NAN if xi < 0.1 or xi > 10: warnings.warn( "The parameter 'xi' should be between 0.1 and 10 and is: " + str(xi)) return np.NAN if sigma0 < 0 or sigma0 > 1: warnings.warn( "The parameter 'sigma0' should be between 0 and 1 and is: " + str(sigma0)) return np.NAN if sigma1 < 0 or sigma1 > 1: warnings.warn( "The parameter 'sigma1' should be between 0 and 1 and is: " + str(sigma1)) return np.NAN if phi1 < 0 or phi1 > 1: warnings.warn( "The parameter 'phi1' should be between 0 and 1 and is: " + str(phi1)) return np.NAN if muh < 0 or muh > 100: warnings.warn( "The parameter 'muh' should be between 0 and 100 and is: " + str(muh)) return np.NAN try: omegaBeta = np.sqrt(math.gamma(3 * (1 + beta) / 2)) / ( (1 + beta) * np.sqrt(math.gamma((1 + beta) / 2)**3)) M_1 = math.gamma(1 + beta) / (np.sqrt(math.gamma(3 * (1 + beta) / 2)) * np.sqrt(math.gamma((1 + beta) / 2))) M_2 = 1 sigma_xi = np.sqrt( np.abs( float((M_2 - M_1**2) * (xi**2 + xi**(-2)) + 2 * M_1**2 - M_2))) cBeta = (math.gamma(3 * (1 + beta) / 2) / math.gamma( (1 + beta) / 2))**(1 / (1 + beta)) except ValueError: raise LikelihoodError( "Please check your parameter input there is something wrong with the parameter" ) if xi != 0.0: mu_xi = M_1 * (xi - (xi**(-1))) else: mu_xi = 0.0 n = data.__len__() sum_at = 0 # formula for a_t is from page 3, (6) for j in range(n - 1): t = j + 1 if t > 0 and t < n and type(t) == type(1): a_t = (errorArr[t] - phi1 * errorArr[t - 1]) / (measerror[t]) else: warnings.warn( "Your parameter 't' does not suit to the given data list") return None a_xi_t = xi**(-1 * np.sign(mu_xi + sigma_xi * a_t)) * (mu_xi + sigma_xi * a_t) sum_at += np.abs(a_xi_t)**(2 / (1 + beta)) # page 3 formula 5 of this paper explain that sigma[t] = sigma0 + sigma1*E[t] # where E[t] is called y(x) in the main paper (discrepancy) and sigma0 and sigma1 are input parameter which also # can be generate by the function itself. Then # E[t] = Y_{ht}*mu[t] # where Y_{ht} should be the simulated model data and mu_t = exp(mu_h * Y_{ht}). # So, mu_h is "a bias parameter to be inferred from the model." (cite, page 3, formula (3)) mu_t = np.exp(muh * comparedata) E = comparedata * mu_t sigmas = sigma0 + sigma1 * E if sigmas[sigmas <= 0.0].size > 0: warnings.warn( "Sorry, you comparedata have negative values. Maybe you model has some inaccurate" " assumptions or there is another error." " We cannot calculate this likelihood") return np.NAN return n * np.log(omegaBeta * (2 * sigma_xi) / np.abs(xi + (1 / xi))) - np.sum( np.log(sigmas)) - cBeta * sum_at
def SkewedStudentLikelihoodHeteroscedastic(data, comparedata, measerror=None, params=None): """ Under the assumption that the data are heteroscedastic, i.e. the they have for every measurement another error and that the residuals are non-Gaussian distributed we perform a likelihoodcalculation based on this formualar, having :math:`k` as the skewness parameter from the data and where we assume that the kurtosis parameter :math:`\\nu > 2`: .. math:: p = \\prod_{i=1}^n p_i Where .. math:: \\eta_i = (\\epsilon_i-\\epsilon_{i-1}\\phi)\\sqrt{1-\\phi^2} and .. math:: p_i = \\frac{2c_2\\Gamma(\\frac{\\nu+1}{2})\\sqrt{\\frac{\\nu}{\\nu-2}}}{\\Gamma(\\frac{\\nu}{2})\\sqrt{\\pi \\nu}\\sqrt{1-\\phi^2}\\sigma_i} \\times (1+\\frac{1}{\\nu-2}(\\frac{c_1+c_2+eta_i}{k^{sign(c_1+c_2+eta_i)}})^2)^{-\\frac{\\nu+1}{2}} and .. math:: c_1 = \\frac{(k^2-\\frac{1}{2})2\\Gamma(\\frac{\\nu+1}{2})\\sqrt{\\frac{\\nu}{\\nu-2}}(\\nu-2)}{k+\\frac{1}{k}\\Gamma(\\frac{\\nu}{2})\\sqrt{\\pi \\nu}(\\nu-1)} and .. math:: c_2 = \\sqrt{-c_1^2+\\frac{k^3+\\frac{1}{k^3}}{k+\\frac{1}{k}}} For detailed mathematical question take a look into hessd-12-2155-2015.pdf (https://www.hydrol-earth-syst-sci-discuss.net/12/2155/2015/hessd-12-2155-2015.pdf) pages 2165-2169, formular (15). `Usage:` Maximizing the likelihood value guides to the best model. Be aware that only a right model asumption leads to a result which makes sense. :param data: observed measurements as a numerical list :type data: list :param comparedata: simulated data from a model which should fit the original data somehow :type comparedata: list :param measerror: measurement errors of every data input, if nothing is given a standart calculation is done to simulate measurement errors :type measerror: list :param params: Contains a tuple of model parameter which are needed for calculating the likelihood. Where the first component contains the values and the second the names of the valules. Following parameter are needed in this function: `likelihood_nu` > 2 `likelihood_kappa` > 0 -1 < `likelihood_phi` < 1 :type params: tuple :return: the p value as a likelihood :rtype: float """ __standartChecksBeforeStart(data, comparedata) if measerror is None: measerror = __generateMeaserror(data) measerror = np.array(measerror) measerror = __jitter_measerror_if_needed( "SkewedStudentLikelihoodHeteroscedastic", measerror) diff = np.array(__calcSimpleDeviation(data, comparedata)) paramDependencies = ["likelihood_nu", "likelihood_kappa", "likelihood_phi"] if params is None: # based on VRUGTS paper, footnote "YING", page 307 nu = np.random.uniform(2.001, 100, 1) k = np.random.uniform(0.001, 100, 1) phi = np.random.uniform(-0.99, 0.99, 1) else: missingparams = [] randomparset, parameternames = params randomparset = np.array(randomparset) parameternames = np.array(parameternames) for nm in paramDependencies: if nm not in parameternames: missingparams.append(nm) if missingparams.__len__() > 0: raise LikelihoodError( "Unfortunately contains your param list not all parameters which are needed for this class." "Following parameter are needed, too: " + str(missingparams)) nu = randomparset[parameternames == 'likelihood_nu'][0] k = randomparset[parameternames == 'likelihood_kappa'][0] phi = randomparset[parameternames == 'likelihood_phi'][0] if abs(phi) > 1: warnings.warn( "[SkewedStudentLikelihoodHeteroscedastic] The parameter 'phi' should be between -1 and 1 and is: " + str(phi)) return np.NAN if nu <= 2: warnings.warn( "[SkewedStudentLikelihoodHeteroscedastic] The parameter 'nu' should be greater then 2 and is: " + str(nu)) return np.NAN if k <= 0: warnings.warn( "[SkewedStudentLikelihoodHeteroscedastic] The parameter 'k' should be greater then 0 and is: " + str(k)) return np.NAN eta_all = diff[1:] - phi * diff[:-1] * np.sqrt(1 - phi**2) c_1 = ((k**2 - 1 / (k**2)) * 2 * math.gamma( (nu + 1) / 2) * np.sqrt(nu / (nu - 2)) * (nu - 2)) / ((k + (1 / k)) * math.gamma(nu / 2) * np.sqrt(np.pi * nu) * (nu - 1)) for_c2 = -1 * (c_1)**2 + (k**3 + 1 / k**3) / (k + 1 / k) c_2 = np.sqrt(for_c2) # TODO Maximizing with negative to zero? return np.log(-np.prod((2 * c_2 * math.gamma((nu + 1) / 2) * np.sqrt(nu / (nu - 2))) / ( (k + 1 / k) * math.gamma(nu / 2) * np.sqrt(np.pi * nu) * np.sqrt(1 - phi ** 2) * measerror[1:]) \ * (1 + (1 / (nu - 2)) * ( (c_1 + c_2 * eta_all) / (k ** (np.sign(c_1 + c_2 * eta_all)))) ** 2) ** ( -(nu + 1) / 2)))
def log_sum_c(xvals, alpha, beta, m): log_vals = np.log(m) + (alpha - 1) * np.log(xvals) -\ (xvals / beta) ** m - alpha * np.log(beta) -\ np.log(math.gamma(alpha / m)) return log_vals.sum()
def SkewedStudentLikelihoodHeteroscedasticAdvancedARModel( data, comparedata, measerror=None, params=None): """ This function is based of the previos one, called `SkewedStudentLikelihoodHeteroscedastic`. We expand the AR(1) Model so that the expectation of :math:`\\eta_i` is equal to the expectation of a residual :math:`\\epsilon_i`. So we having .. math:: \\eta_i = (\\epsilon_i-\\epsilon_{i-1}\\phi + \\frac{\\phi}{N}\\sum_{j = 1}^{N} \\epsilon_j)\\sqrt{1-\\phi^2} For detailed mathematical question take a look into hessd-12-2155-2015.pdf (https://www.hydrol-earth-syst-sci-discuss.net/12/2155/2015/hessd-12-2155-2015.pdf) pages 2170 formular (20). `Usage:` Maximizing the likelihood value guides to the best model. Be aware that only a right model asumption leads to a result which makes sense. :param data: observed measurements as a numerical list :type data: list :param comparedata: simulated data from a model which should fit the original data somehow :type comparedata: list :param measerror: measurement errors of every data input, if nothing is given a standart calculation is done to simulate measurement errors :type measerror: list :param params: Contains a tuple of model parameter which are needed for calculating the likelihood. Where the first component contains the values and the second the names of the valules. Following parameter are needed in this function: `likelihood_nu` > 2 `likelihood_kappa` > 0 -1 < `likelihood_phi` < 1 :type params: tuple :return: the p value as a likelihood :rtype: float """ __standartChecksBeforeStart(data, comparedata) if measerror is None: measerror = __generateMeaserror(data) measerror = np.array(measerror) measerror = __jitter_measerror_if_needed( "SkewedStudentLikelihoodHeteroscedasticAdvancedARModel", measerror) res = np.array(__calcSimpleDeviation(data, comparedata)) paramDependencies = ["likelihood_nu", "likelihood_kappa", "likelihood_phi"] if params is None: # based on VRUGTS paper, footnote "YING", page 307 nu = np.random.uniform(2.001, 100, 1) k = np.random.uniform(0.001, 100, 1) phi = np.random.uniform(-0.99, 0.99, 1) else: missingparams = [] randomparset, parameternames = params randomparset = np.array(randomparset) parameternames = np.array(parameternames) for nm in paramDependencies: if nm not in parameternames: missingparams.append(nm) if missingparams.__len__() > 0: raise LikelihoodError( "Unfortunately contains your param list not all parameters which are needed for this class." "Following parameter are needed, too: " + str(missingparams)) nu = randomparset[parameternames == 'likelihood_nu'][0] k = randomparset[parameternames == 'likelihood_kappa'][0] phi = randomparset[parameternames == 'likelihood_phi'][0] if abs(phi) > 1: warnings.warn( "[SkewedStudentLikelihoodHeteroscedasticAdvancedARModel] The parameter 'phi' should be between -1 and 1 and is: " + str(phi)) return np.NAN if nu <= 2: warnings.warn( "[SkewedStudentLikelihoodHeteroscedasticAdvancedARModel] The parameter 'nu' should be greater then 2 and is: " + str(nu)) return np.NAN if k <= 0: warnings.warn( "[SkewedStudentLikelihoodHeteroscedasticAdvancedARModel] The parameter 'k' should be greater then 0 and is: " + str(k)) return np.NAN N = data.__len__() eta_all = (res[1:] - phi * res[:-1] + phi / (N) * np.sum(res)) * np.sqrt(1 - phi**2) c_1 = ((k**2 - 1 / (k**2)) * 2 * math.gamma( (nu + 1) / 2) * np.sqrt(nu / (nu - 2)) * (nu - 2)) / ((k + (1 / k)) * math.gamma(nu / 2) * np.sqrt(np.pi * nu) * (nu - 1)) for_c2 = -1 * (c_1)**2 + (k**3 + 1 / k**3) / (k + 1 / k) c_2 = np.sqrt(for_c2) # TODO Maximizing with negative to zero? datas = ((2 * c_2 * math.gamma((nu + 1) / 2) * np.sqrt(nu / (nu - 2))) / ( (k + 1 / k) * math.gamma(nu / 2) * np.sqrt(np.pi * nu) * np.sqrt(1 - phi ** 2) * measerror[1:]) \ * (1 + (1 / (nu - 2)) * ( (c_1 + c_2 * eta_all) / (k ** (np.sign(c_1 + c_2 * eta_all)))) ** 2) ** ( -(nu + 1) / 2)) return np.log(-np.prod((2 * c_2 * math.gamma((nu + 1) / 2) * np.sqrt(nu / (nu - 2))) / ( (k + 1 / k) * math.gamma(nu / 2) * np.sqrt(np.pi * nu) * np.sqrt(1 - phi ** 2) * measerror[1:]) \ * (1 + (1 / (nu - 2)) * ( (c_1 + c_2 * eta_all) / (k ** (np.sign(c_1 + c_2 * eta_all)))) ** 2) ** ( -(nu + 1) / 2)))
mus = ms / mstar dnms1 = np.exp(-mus) * (phi1s * mus**alpha1 + phi2s * mus**alpha2) / mstar # # using Bernardi et al. 2013 double Schechter fit for large M* # mstarb = 0.0094e9 phisb = 1.040e-2 alphab = 1.665 betab = 0.255 phisg = 0.675e-2 mstarg = 2.7031e9 gammag = 0.296 gammanorm = math.gamma(alphab / betab) musb = ms / mstarb musg = ms / mstarg dnms2 = (phisb * np.exp(-musb**betab) * musb**(alphab - 1) / (mstarb) * betab / gammanorm + phisg * musg**(gammag - 1) * np.exp(-musg) / mstarg) # # multiply by M* to get dn/dlnM and take maximum # of Baldry et al. and Bernardi et al stellar mass functions to construct the composite # dnms1 = dnms1 * ms dnms2 = dnms2 * ms dnms = np.maximum(dnms1, dnms2)
def gamma_fun_pdf(xvals, alpha, beta): pdf_vals = ( xvals ** (alpha - 1) * e ** ( - xvals / beta ) )/\ ( beta ** alpha * math.gamma(alpha) ) return pdf_vals
print(math.copysign(-1, -4)) print(math.copysign(-1, 4)) print(math.factorial(3)) print(math.fabs(-4)) print(math.gcd(48, 36)) print(math.exp(1)) print(math.exp(3)) print(math.expm1(1)) print(math.log(2)) print(math.log(2, 10)) print(math.pow(8, 3)) x = 0 print(math.sin(x)) print(math.cos(x)) print(math.tan(x)) print(math.atan(x)) print(math.hypot(2, 5)) y = 1 print(math.degrees(y)) print(math.radians(y)) print(math.cosh(30)) print(math.tanh(60)) z = 2 print(math.erf(z)) print(math.erfc(z)) print(math.gamma(z)) print(math.lgamma(z))
def test_wasp_resources_grid_point(site): # x = np.array([l.split() for l in """0.6010665 -10.02692 32.71442 -6.746912 # 0.5007213 -4.591617 37.10247 -11.0699 # 0.3104101 -1.821247 59.18301 -12.56743 # 0.4674515 16.14293 44.84665 -9.693183 # 0.8710347 5.291974 26.01634 -6.154611 # 0.9998786 -2.777032 15.72486 1.029988 # 0.9079611 -7.882853 16.33216 6.42329 # 0.759553 -5.082487 17.23354 10.18187 # 0.7221162 4.606324 17.96417 11.45838 # 0.8088576 8.196074 16.16308 9.277925 # 0.8800673 3.932325 14.82337 5.380589 # 0.8726974 -3.199536 19.99724 -1.433086""".split("\n")], dtype=np.float) # for x_ in x.T: # print(list(x_)) x = [262978] y = [6504814] npt.assert_almost_equal(site.elevation(x, y), 227.8, 1) # Data from WAsP: # - add turbine (262878,6504814,30) # - Turbine (right click) - reports - Turbine Site Report full precision wasp_A = [2.197305, 1.664085, 1.353185, 2.651781, 5.28438, 5.038289, 4.174325, 4.604496, 5.043066, 6.108261, 6.082033, 3.659798] wasp_k = [1.771484, 2.103516, 2.642578, 2.400391, 2.357422, 2.306641, 2.232422, 2.357422, 2.400391, 2.177734, 1.845703, 1.513672] wasp_f = [5.188083, 2.509297, 2.869334, 4.966141, 13.16969, 9.514355, 4.80275, 6.038354, 9.828702, 14.44174, 16.60567, 10.0659] wasp_spd = [0.6010665, 0.5007213, 0.3104101, 0.4674515, 0.8710347, 0.9998786, 0.9079611, 0.759553, 0.7221162, 0.8088576, 0.8800673, 0.8726974] wasp_trn = [-10.02692, -4.591617, -1.821247, 16.14293, 5.291974, - 2.777032, -7.882853, -5.082487, 4.606324, 8.196074, 3.932325, -3.199536] wasp_inc = [-6.746912, -11.0699, -12.56743, -9.693183, -6.154611, 1.029988, 6.42329, 10.18187, 11.45838, 9.277925, 5.380589, -1.433086] wasp_ti = [32.71442, 37.10247, 59.18301, 44.84665, 26.01634, 15.72486, 16.33216, 17.23354, 17.96417, 16.16308, 14.82337, 19.99724] rho = 1.179558 wasp_u_mean = [1.955629, 1.473854, 1.202513, 2.350761, 4.683075, 4.463644, 3.697135, 4.080554, 4.470596, 5.409509, 5.402648, 3.300305] wasp_p_air = [9.615095, 3.434769, 1.556282, 12.45899, 99.90289, 88.03519, 51.41135, 66.09097, 85.69466, 164.5592, 193.3779, 56.86945] wasp_aep = np.array([3725293.0, 33722.71, 0.3093564, 3577990.0, 302099600.0, 188784100.0, 48915640.0, 84636210.0, 189009800.0, 549195100.0, 691258600.0, 120013000.0]) / 1000 wasp_aep_no_density_correction = np.array([3937022.0, 36046.93, 0.33592, 3796496.0, 314595600.0, 196765700.0, 51195440.0, 88451200.0, 197132700.0, 568584400.0, 712938400.0, 124804600.0]) / 1000 wasp_aep_total = 2.181249024 wasp_aep_no_density_correction_total = 2.26224 wt_u = np.array([3.99, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]) wt_p = np.array([0, 55., 185., 369., 619., 941., 1326., 1741., 2133., 2436., 2617., 2702., 2734., 2744., 2747., 2748., 2748., 2750., 2750., 2750., 2750., 2750., 2750.]) wt_ct = np.array([0, 0.871, 0.853, 0.841, 0.841, 0.833, 0.797, 0.743, 0.635, 0.543, 0.424, 0.324, 0.258, 0.21, 0.175, 0.147, 0.126, 0.109, 0.095, 0.083, 0.074, 0.065, 0.059]) wt = OneTypeWindTurbines.from_tabular(name="NEG-Micon 2750/92 (2750 kW)", diameter=92, hub_height=70, ws=wt_u, ct=wt_ct, power=wt_p, power_unit='kw') A_lst, k_lst, f_lst, spd_lst, orog_trn_lst, flow_inc_lst, tke_lst = [site.interp_funcs[n]( (x, y, 30, range(0, 360, 30))) for n in ['A', 'k', 'f', 'spd', 'orog_trn', 'flow_inc', 'tke']] f_lst = f_lst * 360 / 12 pdf_lst = [lambda x, A=A, k=k: k / A * (x / A)**(k - 1) * np.exp(-(x / A)**k) * (x[1] - x[0]) for A, k in zip(A_lst, k_lst)] cdf_lst = [lambda x, A=A, k=k: 1 - np.exp(-(x / A) ** k) for A, k in zip(A_lst, k_lst)] dx = .1 ws = np.arange(dx / 2, 35, dx) # compare to wasp data npt.assert_array_equal(A_lst, wasp_A) npt.assert_array_equal(k_lst, wasp_k) npt.assert_array_almost_equal(f_lst, np.array(wasp_f) / 100) npt.assert_array_almost_equal(spd_lst, wasp_spd) npt.assert_array_almost_equal(orog_trn_lst, wasp_trn) npt.assert_array_almost_equal(flow_inc_lst, wasp_inc) npt.assert_array_almost_equal(tke_lst, np.array(wasp_ti) / 100) # compare pdf, u_mean and aep to wasp WD, WS, TI, P = site.local_wind(x, np.array(y) + 1e-6, 30, wd=np.arange(0, 360, 30), ws=ws) P = P / f_lst[na, :, na] # only wind speed probablity (not wdir) # pdf for i in range(12): npt.assert_array_almost_equal(np.interp(ws, WS[0, i], np.cumsum(P[0, i])), np.cumsum(pdf_lst[i](ws)), 1) # u_mean npt.assert_almost_equal([A * math.gamma(1 + 1 / k) for A, k in zip(A_lst, k_lst)], wasp_u_mean, 5) npt.assert_almost_equal([(pdf(ws) * ws).sum() for pdf in pdf_lst], wasp_u_mean, 5) npt.assert_almost_equal((P * WS).sum((0, 2)), wasp_u_mean, 5) # air power p_air = [(pdf(ws) * 1 / 2 * rho * ws**3).sum() for pdf in pdf_lst] npt.assert_array_almost_equal(p_air, wasp_p_air, 3) npt.assert_array_almost_equal((P * 1 / 2 * rho * WS**3).sum((0, 2)), wasp_p_air, 2) # AEP AEP_ilk = AEPCalculator(wake_model=NOJ(site, wt)).calculate_AEP_no_wake_loss( x_i=x, y_i=y, h_i=30, wd=np.arange(0, 360, 30), ws=ws) if 0: import matplotlib.pyplot as plt plt.plot(wasp_aep_no_density_correction / 1000, '.-', label='WAsP') plt.plot(AEP_ilk.sum((0, 2)) * 1e3, label='PyWake') plt.xlabel('Sector') plt.ylabel('AEP [MWh]') plt.legend() plt.show() npt.assert_array_less(np.abs(wasp_aep_no_density_correction - AEP_ilk.sum((0, 2)) * 1e6), 300) npt.assert_almost_equal(AEP_ilk.sum(), wasp_aep_no_density_correction_total, 3)
def Difussion(self, range): ans = [] for i in range: ans.append(2 * (self.q ** 2) / self.h * (self.MFP / self.L) * gamma(self.r + 1) * FermiDiracIntegral.GetValue(self.r - 1, i)) return ans
def e_areas(speed_wind, windFarmData, curvesData, curvesN, factores, entrance, et, es, b, numAreas): import math import numpy as np plantas = len(windFarmData[0]) ######################################################################### ###### #Producción de energía del parque e_plantas = [[] for x in range(plantas)] loss = 0.897 #Pérdidas además de la estela: 3% operativas, 7.5% envejecimiento res_n = 0.1 for p in range(plantas): #print("P ",p) mes = np.mod(et + 1, 12) if mes == 0: mes = 11 else: mes = mes - 1 #Calculando las velocidades para todas las hileras de turbinas velh = speed_wind[p][1][et][es] * ( windFarmData[6][p] / windFarmData[5][p])**windFarmData[7][p] #Ajustando por altura v = velh * factores[0][p][mes][ b] #Multiplicando por la intensidad horaria if (v >= windFarmData[2][p] and v <= windFarmData[3][p]): vmin = windFarmData[2][p] #res=windFarmData[4][p] v_round = np.round((v - vmin) / res_n) * res_n + vmin speeds = [] speeds.append(v_round) num_hileras = len(curvesData[p][2]) for i in range(num_hileras - 1): idx_v = int( (speeds[i] - vmin) / res_n) + 1 #REVISAR VELOCIDAD MÁXIMA if idx_v > 179: idx_v = 179 if idx_v < 0: idx_v = 0 f1 = 1 - (1 - curvesN[p][1][idx_v])**0.5 k = 0.075 #Para la Guajira f2 = 1 - f1 * (windFarmData[10][p] / (windFarmData[10][p] + 2 * k * windFarmData[9][p]))**2 #Chequear indices ve = speeds[i] * f2 nv = np.round((ve - vmin) / res_n) * res_n + vmin speeds.append(nv) #Calculando la producción de cada turbina dias = dias = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] d = dias[mes] #Ignorando el día adicional en los bisiestos e = [] for i in range(num_hileras): k_wei = (factores[1][p][mes][b] / speeds[i])**-1.086 c_wei = speeds[i] / math.gamma(1 + 1 / k_wei) wei = [] for v in range(len(curvesData[p][3])): wei.append((k_wei / c_wei) * np.exp(-(curvesData[p][3][v] / c_wei)**k_wei) * (curvesData[p][3][v] / c_wei)**(k_wei - 1)) argint = np.dot(wei, curvesN[p][0]) e.append(argint * res_n * loss * d / 1000) #producción de 1 turbina en cada hilera EN MWH e_plantas[p] = np.dot(e, curvesData[p][2]) * entrance[p][ et] #multiplicando por el # de turbinas por hilera y chequeando entrada else: e_plantas[p] = 0 #Calculando totales por Áreas. Revisar NOMBRE O NÚMERO DE ÁREA e_areas = [0 for x in range(numAreas)] for p in range(plantas): area = int(windFarmData[12][p]) - 1 e_areas[area] = e_areas[area] + e_plantas[p] return e_areas
def B(alpha: float, beta: float) -> float: """모든 확률 값의 합이 1이 되도록 해주는 정규화값""" return math.gamma(alpha) * math.gamma(beta) / math.gamma(alpha + beta)
def gammaincc(a, x): return upper_incomplete_gamma(a, x) / gamma(a)
def __init__(self, alpha): self._alpha = np.array(alpha) self._coef = gamma(np.sum(self._alpha)) / \ reduce(mul, [gamma(a) for a in self._alpha])
def gammainc(a, x): return lower_incomplete_gamma(a, x) / gamma(a)
def V_sph_analytical(dim): return math.pi**(dim / 2.0) / math.gamma(dim / 2.0 + 1.0)
def train(self): pop = [self.create_solution() for _ in range(0, self.pop_size)] g_best = self.get_global_best_solution(pop, self.ID_FIT, self.ID_MIN_PROB) for epoch in range(0, self.epoch): # Update the location of Harris' hawks for i in range(0, self.pop_size): E0 = 2 * uniform() - 1 # -1 < E0 < 1 E = 2 * E0 * ( 1 - (epoch + 1) * 1.0 / self.epoch ) # factor to show the decreasing energy of rabbit J = 2 * (1 - uniform()) # -------- Exploration phase Eq. (1) in paper ------------------- if (abs(E) >= 1): # Harris' hawks perch randomly based on 2 strategy: if (uniform() >= 0.5): # perch based on other family members X_rand = deepcopy(pop[randint( 0, self.pop_size)][self.ID_POS]) pop[i][self.ID_POS] = X_rand - uniform() * abs( X_rand - 2 * uniform() * pop[i][self.ID_POS]) else: # perch on a random tall tree (random site inside group's home range) X_m = mean([x[self.ID_POS] for x in pop]) pop[i][self.ID_POS] = ( g_best[self.ID_POS] - X_m) - uniform() * (self.lb + uniform() * (self.ub - self.lb)) # -------- Exploitation phase ------------------- else: # Attacking the rabbit using 4 strategies regarding the behavior of the rabbit # phase 1: ----- surprise pounce (seven kills) ---------- # surprise pounce (seven kills): multiple, short rapid dives by different hawks if (uniform() >= 0.5): delta_X = g_best[self.ID_POS] - pop[i][self.ID_POS] if (abs(E) >= 0.5): # Hard besiege Eq. (6) in paper pop[i][self.ID_POS] = delta_X - E * abs( J * g_best[self.ID_POS] - pop[i][self.ID_POS]) else: # Soft besiege Eq. (4) in paper pop[i][self.ID_POS] = g_best[ self.ID_POS] - E * abs(delta_X) else: xichma = power( (gamma(1 + 1.5) * sin(pi * 1.5 / 2.0)) / (gamma( (1 + 1.5) * 1.5 * power(2, (1.5 - 1) / 2)) / 2.0), 1.0 / 1.5) LF_D = 0.01 * uniform() * xichma / power( abs(uniform()), 1.0 / 1.5) if (abs(E) >= 0.5): # Soft besiege Eq. (10) in paper Y = g_best[self.ID_POS] - E * abs( J * g_best[self.ID_POS] - pop[i][self.ID_POS]) fit_Y = self.get_fitness_position(Y) else: # Hard besiege Eq. (11) in paper X_m = mean([x[self.ID_POS] for x in pop]) Y = g_best[self.ID_POS] - E * abs( J * g_best[self.ID_POS] - X_m) fit_Y = self.get_fitness_position(Y) Z = Y + uniform(self.lb, self.ub) * LF_D fit_Z = self.get_fitness_position(Z) if fit_Y < pop[i][self.ID_FIT]: pop[i] = [Y, fit_Y] if fit_Z < pop[i][self.ID_FIT]: pop[i] = [Z, fit_Z] ## batch size idea if i % self.batch_size: g_best = self.update_global_best_solution( pop, self.ID_MIN_PROB, g_best) self.loss_train.append(g_best[self.ID_FIT]) if self.verbose: print("> Epoch: {}, Best fit: {}".format( epoch + 1, g_best[self.ID_FIT])) self.solution = g_best return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
def t(x, mu, lambda_, ny): """ 平均mu、精度lambda_、自由度nyのt分布でのxの出現する確率密度を返す """ return gamma(ny/2+1/2)/gamma(ny/2)*(lambda_/np.pi/ny)**0.5*(1+lambda_*(x-mu)**2/ny)**(-ny/2-1/2)
def gen_gamma_fun_pdf(xvals, alpha, beta, m): pdf_vals = (m * e ** ( - ( xvals / beta ) ** m ))/\ (xvals * math.gamma(alpha / m)) *\ (xvals / beta) ** alpha return pdf_vals