def mean_from_output(output): """Given output of the form [params,ll], return mean parameter vector""" param_vectors,lls = transpose(output) z = sum(mpmath.exp(ll) for ll in lls) return map(sum,transpose([[(mpmath.exp(ll)/z)*p for p in param_vector] for param_vector,ll in output]))
def integrate( self, a, b ): halfk = self.halfforceconstant x0 = self.reference beta = self.beta avalue = exp( -beta*halfk*(a-x0)**2 ) bvalue = exp( -beta*halfk*(b-x0)**2 ) return 0.5*(b-a)*( avalue + bvalue )
def sigma(self,z): # A+S 18.10. from mpmath import pi, jtheta, exp, mpc, sqrt, sin Delta = self.Delta e1, _, _ = self.__roots om = self.__periods[0] / 2 omp = self.__periods[1] / 2 if self.__ng3: z = mpc(0,1) * z if Delta > 0: tau = omp / om q = (exp(mpc(0,1) * pi() * tau)).real eta = -(pi()**2 * jtheta(n=1,z=0,q=q,derivative=3)) / (12 * om * jtheta(n=1,z=0,q=q,derivative=1)) v = (pi() * z) / (2 * om) retval = (2 * om) / pi() * exp((eta * z**2)/(2 * om)) * jtheta(n=1,z=v,q=q)/jtheta(n=1,z=0,q=q,derivative=1) elif Delta < 0: om2 = om + omp om2p = omp - om tau2 = om2p / (2 * om2) q = mpc(0,(mpc(0,1) * exp(mpc(0,1) * pi() * tau2)).imag) eta2 = -(pi()**2 * jtheta(n=1,z=0,q=q,derivative=3)) / (12 * om2 * jtheta(n=1,z=0,q=q,derivative=1)) v = (pi() * z) / (2 * om2) retval = (2 * om2) / pi() * exp((eta2 * z**2)/(2 * om2)) * jtheta(n=1,z=v,q=q)/jtheta(n=1,z=0,q=q,derivative=1) else: g2, g3 = self.__invariants if g2 == 0 and g3 == 0: retval = z else: c = e1 / 2 A = sqrt(3 * c) retval = (1 / A) * sin(A*z) * exp((c*z**2) / 2) if self.__ng3: return mpc(0,-1) * retval else: return retval
def fermihalf(x, sgn): """ Series approximation to the F_{1/2}(x) or F_{-1/2}(x) Fermi-Dirac integral """ f = lambda k: mp.sqrt(x ** 2 + np.pi ** 2 * (2 * k - 1) ** 2) # if x < -100: # return 0.0 if x < -9 or True: if sgn > 0: return mp.exp(x) else: return mp.exp(x) if sgn > 0: # F_{1/2}(x) a = np.array((1.0 / 770751818298, -1.0 / 3574503105, -13.0 / 184757992, 85.0 / 3603084, 3923.0 / 220484, 74141.0 / 8289, -5990294.0 / 7995)) g = lambda k: mp.sqrt(f(k) - x) else: # F_{-1/2}(x) a = np.array((-1.0 / 128458636383, -1.0 / 714900621, -1.0 / 3553038, 27.0 / 381503, 3923.0 / 110242, 8220.0 / 919)) g = lambda k: -0.5 * mp.sqrt(f(k) - x) / f(k) F = np.polyval(a, x) + 2 * np.sqrt(2 * np.pi) * sum(map(g, range(1, 21))) return F
def dedekind(tau, floatpre): """ Algorithm 22 (Dedekind eta) Input : tau in the upper half-plane, k in N Output : eta(tau) """ a = 2 * mpmath.pi / mpmath.mpf(24) b = mpmath.exp(mpmath.mpc(0, a)) p = 1 m = 0 while m <= 0.999: n = nearest_integer(tau.real) if n != 0: tau -= n p *= b ** n m = tau.real * tau.real + tau.imag * tau.imag if m <= 0.999: ro = mpmath.sqrt(mpmath.power(tau, -1) * 1j) if ro.real < 0: ro = -ro p = p * ro tau = (-p.real + p.imag * 1j) / m q1 = mpmath.exp(a * tau * 1j) q = q1 ** 24 s = 1 qs = mpmath.mpc(1, 0) qn = 1 des = mpmath.mpf(10) ** (-floatpre) while abs(qs) > des: t = -q * qn * qn * qs qn = qn * q qs = qn * t s += t + qs return p * q1 * s
def genenergies(fnR,fnQ,seqsR,seqsQ,gamma,sQ,sR,R0): #Parses seqs and model type then calculates and returns energies R is transcription factor, Q is RNAP ematR = np.genfromtxt(fnR,skiprows=1) ematQ = np.genfromtxt(fnQ,skiprows=1) fR = open(fnR) fQ = open(fnQ) mattype = fR.read()[:6] #mattype must be the same #mattypeQ = fQ.read()[:6] energies = np.zeros(len(seqsQ)) N = len(seqsQ) mut_region_lengthQ = len(seqsQ[0]) mut_region_lengthR = len(seqsR[0]) if mattype == '1Point': for i,s in enumerate(seqsR): seq_matR = seq2mat(s) seq_matQ = seq2mat(seqsQ[i]) RNAP = (seq_matQ*ematQ).sum()*sQ TF = (seq_matR*ematR).sum()*sR + R0 energies[i] = -RNAP + mp.log(1 + mp.exp(-TF - gamma)) - mp.log(1 + mp.exp(-TF)) ''' elif mattype == '2Point': for i,s in enumerate(seqs): seq_mat = np.zeros(round(sp.misc.comb(mut_region_length,2))*16) seq_mat[seq2mat2(s)] = 1 energies[i] = (seq_mat*(emat.ravel())).sum() elif mattype == '3Point': for i,s in enumerate(seqs): seq_mat = np.zeros(round(sp.misc.comb(mut_region_length,3))*64) seq_mat[seq2mat3(s)] = 1 energies[i] = (seq_mat*(emat.ravel())).sum() ''' return energies
def test_stoch_eig_high_prec(): n = 1e-100 with mp.workdps(100): P = mp.matrix([[1-3*(mp.exp(n)-1), 3*(mp.exp(n)-1)], [mp.exp(n)-1 , 1-(mp.exp(n)-1)]]) run_stoch_eig(P, verbose=VERBOSE)
def dfdy (y,x,b,c): global FT,XO,XT,XF ft=FT v=x[0] i=y[0] iss=IS=b[0] n=N=b[1] ikf=IKF=b[2] isr=ISR=b[3] nr=NR=b[4] vj=VJ=b[5] m=M=b[6] rs=RS=b[7] #fh = iss**mpm.mpf(2)*rs*mpm.sqrt(ikf/(ikf + iss*(mpm.exp((-i*rs + v)/(ft*n)) - mpm.mpf(1))))*(mpm.exp((-i*rs + v)/(ft*n)) - mpm.mpf(1))*mpm.exp((-i*rs + v)/(ft*n))/(mpm.mpf(2)*ft*n*(ikf + iss*(mpm.exp((-i*rs + v)/(ft*n)) - mpm.mpf(1)))) - iss*rs*mpm.sqrt(ikf/(ikf + iss*(mpm.exp((-i*rs + v)/(ft*n)) - mpm.mpf(1))))*mpm.exp((-i*rs + v)/(ft*n))/(ft*n) #sh = isr*m*rs*(mpm.mpf(1) - (-i*rs + v)/vj)*((mpm.mpf(1) - (-i*rs + v)/vj)**mpm.mpf(2) + mpm.mpf('0.005'))**(m/mpm.mpf(2))*(mpm.exp((-i*rs + v)/(ft*nr)) - mpm.mpf(1))/(vj*((mpm.mpf(1) - (-i*rs + v)/vj)**mpm.mpf(2) + mpm.mpf('0.005'))) - isr*rs*((mpm.mpf(1) - (-i*rs + v)/vj)**mpm.mpf(2) + mpm.mpf('0.005'))**(m/mpm.mpf(2))*mpm.exp((-i*rs + v)/(ft*nr))/(ft*nr) fh = iss**XT*rs*mpm.sqrt(ikf/(ikf + iss*(mpm.exp((-i*rs + v)/(ft*n)) - XO)))*(mpm.exp((-i*rs + v)/(ft*n)) - XO)*mpm.exp((-i*rs + v)/(ft*n))/(XT*ft*n*(ikf + iss*(mpm.exp((-i*rs + v)/(ft*n)) - XO))) - iss*rs*mpm.sqrt(ikf/(ikf + iss*(mpm.exp((-i*rs + v)/(ft*n)) - XO)))*mpm.exp((-i*rs + v)/(ft*n))/(ft*n) sh = isr*m*rs*(XO - (-i*rs + v)/vj)*((XO - (-i*rs + v)/vj)**XT + XF)**(m/XT)*(mpm.exp((-i*rs + v)/(ft*nr)) - XO)/(vj*((XO - (-i*rs + v)/vj)**XT + XF)) - isr*rs*((XO - (-i*rs + v)/vj)**XT + XF)**(m/XT)*mpm.exp((-i*rs + v)/(ft*nr))/(ft*nr) return mpm.matrix ([[fh+sh]])
def test_gth_solve_high_prec(): n = 1e-100 with mp.workdps(100): P = mp.matrix([[-3*(mp.exp(n)-1), 3*(mp.exp(n)-1)], [mp.exp(n)-1 , -(mp.exp(n)-1) ]]) run_gth_solve(P, verbose=VERBOSE)
def bimax_integrand(self, z, wc, l, n, t): """ Integrand of electron-noise integral. """ return f1(wc*l/z/mp.sqrt(2)) * z * \ (mp.exp(-z**2) + n/mp.sqrt(t)*mp.exp(-z**2 / t)) / \ (mp.fabs(BiMax.d_l(z, wc, n, t))**2 * wc**2)
def DedekindEtaA4(tau): ''' Compute the derivative of the Dedekind Eta function for imaginary argument tau. Numerically. ''' try: import mpmath as mp mpmath_loaded = True except ImportError: mpmath_loaded = False return mp.cbrt(0.5*mp.jtheta(2,0,mp.exp(-mp.pi*tau))*mp.jtheta(3,0,mp.exp(-mp.pi*tau))*mp.jtheta(4,0,mp.exp(-mp.pi*tau)))
def fl1(x0, seta2, seff2): coeff = 1./np.sqrt(np.pi) xm = x0/np.sqrt(2.*seta2) tau = lamda1*seff2/np.sqrt(2.*seta2) intgr1 = coeff*exp(-(tau-xm)**2) -(tau-xm)*erfc(tau-xm) intgr2 = coeff*exp(-(tau+xm)**2) -(tau+xm)*erfc(tau+xm) return .5*(np.sqrt(2.*seta2)/(1.+lamda2*seff2))*(intgr1+intgr2)
def DedekindEtaA2(tau): ''' Compute the derivative of the Dedekind Eta function for imaginary argument tau. Numerically. ''' try: import mpmath as mp mpmath_loaded = True except ImportError: mpmath_loaded = False return mp.exp(-mp.pi/12.0)*mp.jtheta(3,mp.pi*(mp.j*tau+1.0)/2.0,mp.exp(-3.0*mp.pi))
def fl2(x0, seta2, seff2): coeff = 1./np.sqrt(np.pi) xm = x0/np.sqrt(2.*seta2) tau = lamda1*seff2/np.sqrt(2.*seta2) intgr1 = -coeff*(tau-xm)*exp(-(tau-xm)**2) +(.5+(tau-xm)**2)*erfc(tau-xm) intgr2 = -coeff*(tau+xm)*exp(-(tau+xm)**2) +(.5+(tau+xm)**2)*erfc(tau+xm) return (seta2/(1.+lamda2*seff2)**2)*(intgr1+intgr2)
def temp_t(t, x=mpf(1), Q=mpf(1), A=mpf(1), Ti=mpf(10)): u''' Definição da derivada da temperatura em relação ao tempo, temp_t = T_t(t, x) ''' t = mpf(t) termo1 = sqrt(A/(t*pi)) * exp(-x**2/(t*4*A)) termo2 = sqrt(A/(t*pi)) * (x**2/(t**2 * 2 * A)) * exp(-x**2/(t*4*A)) termo3 = (x**2/(sqrt(t**3 * A) * 4)) * erfc_z(x / (mpf(2) * sqrt(t * A))) return Q * (termo1 + termo2 + termo3)
def sph_i2n_exact(n, z): """Return the value of i^{(2)}_n computed using the exact formula. The expression used is http://dlmf.nist.gov/10.49.E10 . """ zm = mpmathify(z) s1 = sum(mpc(-1,0)**k * _a(k, n)/zm**(k+1) for k in xrange(n+1)) s2 = sum(_a(k, n)/zm**(k+1) for k in xrange(n+1)) return exp(zm)/2 * s1 + mpc(-1,0)**n*exp(-zm)/2 * s2
def gaussian_total(self,offset): factor = mpmath.sqrt(1.0/(self._period*self._tau)) factor_exponent = -1.0/(2*self._period) exponent_factor = mpmath.mpf(offset)*offset exponent_interval = self._tau*self._tau exponent_offset = 2*self._tau*offset factor_full = factor*mpmath.exp(exponent_factor*factor_exponent) q = mpmath.exp(factor_exponent*exponent_interval) z = factor_exponent*exponent_offset/(2*mpmath.j) theta = mpmath.jtheta(3,z,q).real return factor_full*theta
def sum_gaussian_theta(variance, offset, interval): factor = mpmath.sqrt(1.0/(variance*tau)) factor_exponent=mpmath.mpf(-1.0)/(2*variance) exponent_factor = mpmath.mpf(offset)*offset factor_full = factor*mpmath.exp(exponent_factor*factor_exponent) exponent_interval = interval*interval exponent_offset = 2*interval*offset q = mpmath.exp(factor_exponent*exponent_interval) z = factor_exponent*exponent_offset/(2*mpmath.j) theta = mpmath.jtheta(3,z,q) return factor_full*theta
def fq(x0, seta2, seff2): coeff = 1./np.sqrt(np.pi) xm = x0/np.sqrt(2.*seta2) tau = lamda1*seff2/np.sqrt(2.*seta2) psi = lamda2*seff2*x0/np.sqrt(2.*seta2) intgr1 = coeff*(-tau+2*psi+xm)*exp(-(tau+xm)**2) +.5*(1.+2.*(tau-psi)**2)*erfc(tau+xm) intgr2 = coeff*(-tau-2*psi-xm)*exp(-(tau-xm)**2) +.5*(1.+2.*(tau+psi)**2)*erfc(tau-xm) intgr3 = .5*x0**2*(erf(tau+xm)+erf(tau-xm)) return (seta2/(1.+lamda2*seff2)**2)*(intgr1+intgr2)+intgr3
def energy(self, clustering): energy = mpmath.mpf(0.0) new_vertex_distributions = _combine_vertex_distributions_given_clustering( self.vertex_distributions, clustering) # likelihood likelihood_energy = -self._log_likelihood(clustering, new_vertex_distributions) # prior on similarity: # We prefer the cluster whose minimum similarity is large. # - the similarity of a pair of vertexes is measured by the similarity # of top 10 words in the distribution. (measure each word type # respectively and take average) intra_cluster_energy = mpmath.mpf(0.0) for cluster_id, cluster_vertex_set in enumerate(clustering): min_similarity_within_cluster = self._min_similarity_within_cluster(cluster_vertex_set, new_vertex_distributions[cluster_id]) intra_cluster_energy += -mpmath.log(mpmath.exp(min_similarity_within_cluster - 1)) # Between cluster similarity: # - For each pair of clusters, we want to find the pair of words with maximum similarity # and prefer this similarity value to be small. inter_cluster_energy = mpmath.mpf(0.0) if len(clustering) > 1: for i in range(0, len(clustering)-1): for j in range(i+1, len(clustering)): max_similarity_between_clusters = self._max_similarity_between_clusters(clustering[i], clustering[j]) inter_cluster_energy += -mpmath.log(mpmath.exp(-max_similarity_between_clusters)) # prior on clustering complexity: prefer small number of clusters. length_energy = -mpmath.log(mpmath.exp(-len(clustering))) # classification: prefer small number of categories. class_energy = 0.0 if self._classifier is not None: num_classes = self._calculate_num_of_categories(clustering, new_vertex_distributions) class_energy = -mpmath.log(mpmath.exp(-(abs(num_classes-len(clustering))))) # classification confidence: maximize the classification confidence confidence_energy = 0.0 for cluster_id, cluster_vertex_set in enumerate(clustering): (category, confidence) = self._predict_label(new_vertex_distributions[cluster_id]) confidence_energy += -mpmath.log(confidence) energy += (0.5)*likelihood_energy + intra_cluster_energy + inter_cluster_energy + 30.0*length_energy + 20.0*class_energy + confidence_energy logging.debug('ENERGY: {0:12.6f}\t{1:12.6f}\t{2:12.6f}\t{3:12.6f}\t{4:12.6f}\t{5:12.6f}'.format( likelihood_energy.__float__(), intra_cluster_energy.__float__(), inter_cluster_energy.__float__(), length_energy.__float__(), class_energy.__float__(), confidence_energy.__float__())) return energy
def test_talbot(): """test for Talbot numerical inverse Laplace with mpmath""" a = Talbot(f=f1, n=24, shift=0.0, dps=None) #t=0 raise error: assert_raises(ValueError, a, 0) #single value of t: ok_(mpallclose(a(1), mpmath.exp(mpmath.mpf('-1.0')))) #2 values of t: ans = np.array([mpmath.exp(mpmath.mpf('-1.0')), mpmath.exp(mpmath.mpf('-2.0'))]) ok_(mpallclose(a([1,2]),ans))
def DispersionRelation(w): def Disp(w, kp, b, eta): zeta = w / kp Z = Z_PDF(zeta) kyp = ky/kp return - (1. - eta/2. * (1. + b))*kyp * Z \ - eta * kyp * (zeta + zeta**2 * Z) + zeta * Z + 1. # proton sum1MG0 = lambda_D2 * b + (1. - G0) + (1. - G0/m_ie) #return -mp.exp(-b) * Disp(w, kp, b, eta) + mp.exp(-b/m_ie) * Disp(w,kp * mp.sqrt(m_ie), b/m_ie, eta*mp.sqrt(m_ie)) + sum1MG0 return mp.exp(-b) * Disp(w, kp, b, eta) - mp.exp(-b/m_ie) * Disp(w,kp * mp.sqrt(m_ie), b/m_ie, eta) + sum1MG0
def GetAnalyticalWaitingtimes(kon,koff,ksyn): """ Get analytical waiting times """ import mpmath mpmath.mp.pretty = True A = mpmath.sqrt(-4*ksyn*kon+(koff + kon + ksyn)**2) x = [] for i in np.linspace(-20,5,5000): x.append(mpmath.exp(i)) y = [] for t in x: B = koff + ksyn - (mpmath.exp(t*A)*(koff+ksyn-kon))-kon+A+ mpmath.exp(t*A)*A p01diff = mpmath.exp(-0.5*t*(koff + kon + ksyn+A))*B/(2.0*A) y.append(p01diff*ksyn) return (x,y)
def step(array): global _INFILE global _BETA global _NSTEP beta = _BETA old_positions, Ntides, is_3prime = array internal = Aptamer("leaprc.ff12SB",_INFILE) identifier = Ntides.replace(" ","") internal.sequence(identifier,Ntides.strip()) internal.unify(identifier) internal.command("saveamberparm union %s.prmtop %s.inpcrd"%(identifier,identifier)) time.sleep(2) #print("WhereamI?") print("Identifier: "+Ntides) volume = (2*math.pi)**5 aptamer_top = app.AmberPrmtopFile("%s.prmtop"%identifier) aptamer_crd = app.AmberInpcrdFile("%s.inpcrd"%identifier) # print("loaded") # if is_3prime == 1: en_pos = [mcmc_sample(aptamer_top, aptamer_crd, old_positions, index, Nsteps=_NSTEP) for index in range(10)] # else: # en_pos_task = [mcmc_sample_five(aptamer_top, aptamer_crd, old_positions, index, Nsteps=200) for index in range(20)] # barrier() # en_pos = value(en_pos_task) en = [] positions = [] positions_s = [] for elem in en_pos: en += elem[0] #print(elem[2], elem[1]) positions_s.append([elem[2], elem[1]]) positions = min(positions_s)[1] fil = open("best_structure%s.pdb"%Ntides,"w") app.PDBFile.writeModel(aptamer_top.topology,positions,file=fil) fil.close() del fil Z = volume*math.fsum([math.exp(-beta*elem) for elem in en])/len(en) P = [math.exp(-beta*elem)/Z for elem in en] S = volume*math.fsum([-elem*math.log(elem*volume) for elem in P])/len(P) print("%s : %s"%(Ntides,S)) return positions, Ntides, S
def rate_cov_strate_sleeping_v3_uniform(k_matrix,alpha,rate_th1,lamda_u1,bw): # define the distribution of the activity first_int = (1/3) # correspond to the integral in first term second_int = (1/2) - (1/3) # correspond to the integral in second term #---------------------------- calibrated ----------------------------------------- expected_activity = (1/2) # expected value of a # this changes for optimization hance should be calibratable expected_strategic_function = (1/2) # expected value of s #---------------------------- calibrated ----------------------------------------- noise_var = 1 # preprocessing - define empty elements and get information about the inputs k_mat = k_matrix.copy() num_tiers = k_mat.shape[0] density_org = k_mat[:,2].copy() power = gb.db2pow(k_mat[:,1]) density_update = np.array(density_org*([1]*(num_tiers-1)+[expected_strategic_function])) # define necessary values area_org = np.zeros(num_tiers,float) # original association probability area_sc_update = np.zeros(num_tiers,float) # association probability of disconnected cell N_k_u1 = np.zeros(num_tiers,float) #number of users in tier K BS N_k_sc = np.zeros(num_tiers,float) #number of users in tier K BS N_k_total = np.zeros(num_tiers,float) threshold_u1 = np.zeros(num_tiers,float) #threshold for users in tier K BS t_func_main = np.zeros(num_tiers,float) t_func_sc = np.zeros(num_tiers,float) for i in range(num_tiers): area_org[i] = A_k(density_org,power,alpha,i) area_sc_update[i] = A_k(density_update,power,alpha,i) N_k_u1 = 1 + 1.28*lamda_u1*(area_org/density_org) N_k_sc = expected_activity*(1-expected_strategic_function)*density_org[-1]*N_k_u1[-1]*(area_sc_update/density_update) #for binary optimization N_k_total = N_k_u1 + N_k_sc threshold_u1 = 2**((rate_th1/bw)*N_k_total) -1 for i in range(num_tiers): first_exp_term = -(threshold_u1[i]*noise_var/power[i]) z_term = 0 if (threshold_u1[i]==0) else (threshold_u1[i])**(2/alpha) *mp.quad(lambda u: 1/ (1+u**(alpha/2)),[(1/threshold_u1[i])**(2/alpha),mp.inf]) second_exp_term = -mp.pi*z_term* sum(density_update*(power/power[i])**(2/alpha)) third_exp_term_main = -mp.pi * sum(density_org*(power/power[i])**(2/alpha)) third_exp_term_sc = -mp.pi * sum(density_update*(power/power[i])**(2/alpha)) t_func_main[i] = mp.quad(lambda y: y * mp.exp(first_exp_term*y**alpha) * mp.exp(second_exp_term*y**2) * mp.exp(third_exp_term_main*y**2),[0,mp.inf]) t_func_sc[i] = mp.quad(lambda y: y* mp.exp(first_exp_term*y**alpha) * mp.exp(second_exp_term*y**2) * mp.exp(third_exp_term_sc*y**2), [0,mp.inf]) temp_second_sum = sum(2*mp.pi*density_update*t_func_sc) temp_third_sum = sum(2*mp.pi*density_org[0:-1]*t_func_main[0:-1]) #rate_coverage = (2*mp.pi*density_org[-1]/expected_activity)*(t_func_main[-1]*first_int + temp_second_sum*second_int) + temp_third_sum rate_coverage = (area_org[-1]/expected_activity)*((2*mp.pi*density_org[-1]/area_org[-1])*t_func_main[-1]*first_int + temp_second_sum*second_int) + temp_third_sum #print((sum(2*mp.pi*density_update*t_func_sc)*second_int+t_func_main[-1]*first_int)*(2*mp.pi*density_org[-1])) #print(t_func_main[-1]*first_int) #print(temp_second_sum*second_int) #print((2*mp.pi*density_org[-1]/expected_activity)*(t_func_main[-1]*first_int) + temp_third_sum) #print(temp_second_sum) return (rate_coverage)
def ThermionicEmissionCurrent(self, Va, phi_bn, debug=False): kT = to_numeric(k * self.T) q_n = to_numeric(q) A = self.Area Rs = self.Rs if self.Semiconductor.dop_type == 'n': Ar = self.Semiconductor.reference['A_R_coeff_n'] * constants['A_R'] else: Ar = self.Semiconductor.reference['A_R_coeff_p'] * constants['A_R'] Js = Ar * (self.T ** 2) * mp.exp(-q_n * phi_bn / kT) if debug: print 'Js, Is =', Js, A * Js J = -Js + kT / (q_n * A * Rs) * mp.lambertw((q_n * A * Rs * Js / kT) * mp.exp(q_n * (Va + A * Js * Rs) / kT)) if debug: print 'J, I =', J, A * J Vd = Va - A * J * Rs return np.float(Vd), np.float(J)
def cost(q, alpha): b = alpha * sum(q) if b == 0: return 0 mx = max(q) a = sum(exp((x - mx) / b) for x in q) return mx + b * log(a)
def polyfit_erfc(nroots, x, low): t = x if t > 19682.99: t = 19682.99 if t > 1.0: tt = mpmath.log(t) / mpmath.log(3) + 1.0 # log3(t) + 1 else: tt = mpmath.sqrt(t) it = int(tt) tt = tt - it tt = 2.0 * tt - 1.0 # map [0, 1] to [-1, 1] u = low * 2 - 1 # map [0, 1] to [-1, 1] tab_rs, tab_ws = tabulate_erfc(nroots, it) im = clenshaw_d1(tab_rs.astype(float), u, nroots) rr = clenshaw_d1(im, tt, nroots) rr = [r / (1 - r) for r in rr] im = clenshaw_d1(tab_ws.astype(float), u, nroots) ww = clenshaw_d1(im, tt, nroots) if x * low**2 < DECIMALS * .7: factor = mpmath.exp(-x * low**2) ww = [w * factor for w in ww] return rr, ww
def calc_model_evidence(self): vval = 0 mp.mp.dps = 50 for action in range(self.hparams.num_actions): # val=1 # aa = self.a[action] # for i in xrange(int(self.a[action]-self.a0)): # aa-=1 # val*=aa # val/=(2.0*math.pi) # val/=self.b[action] # val*=gamma(aa) # val/=(self.b[action]**aa) # val *= np.sqrt(np.linalg.det(self.lambda_prior * np.eye(self.hparams.context_dim + 1)) / np.linalg.det(self.precision[action])) # val *= (self.b0 ** self.a0) # val/= gamma(self.a0) # vval += val #val= 1/float((2.0 * math.pi) ** (self.a[action]-self.a0)) #val*= (float(gamma(self.a[action]))/float(gamma(self.a0))) #val*= np.sqrt(float(np.linalg.det(self.lambda_prior * np.eye(self.hparams.context_dim + 1)))/float(np.linalg.det(self.precision[action]))) #val*= (float(self.b0**self.a0)/float(self.b[action]**self.a[action])) val= mp.mpf(mp.fmul(mp.fneg(mp.log(mp.fmul(2.0 , mp.pi))) , mp.fsub(self.a[action],self.a0))) val+= mp.loggamma(self.a[action]) val-= mp.loggamma(self.a0) val+= 0.5*mp.log(np.linalg.det(self.lambda_prior * np.eye(self.hparams.context_dim + 1))) val -= 0.5*mp.log(np.linalg.det(self.precision[action])) val+= mp.fmul(self.a0,mp.log(self.b0)) val-= mp.fmul(self.a[action],mp.log(self.b[action])) vval+=mp.exp(val) vval/=float(self.hparams.num_actions) return vval
def PiP0(self, gamma): U = 4 * self.theta_f * self.Lf / (2. * self.NN) R = 2 * self.Lf * self.rho / (2. * self.NN) return self.GammaDist(gamma) * mpmath.exp(-(self.GammaDist(gamma) * U / (2. * self.NN)) / (gamma / (self.NN + 0.) + R / (2. * self.NN)))
def integrand(self, eta, l, p, n, kind="A"): if kind not in ("A", "B", "C", "D", "E"): raise NotImplementedError("Integrand types supported \ go only from A to E") if kind == "C": return (mpmath.sqrt(self.k ** 2 - eta ** 2) / eta \ * self.integrand(eta, l, p, n, kind="A")) if kind == "D": return (mpmath.sqrt(self.k ** 2 - eta ** 2) / eta \ * self.integrand(eta, l, p, n, kind="B")) pm = 1 exponent = n - 1 if kind == "B": pm = -1 elif kind == "E": pm = 0 exponent = n if p == 0: lgr_factor = 1 else: lgr_factor = (self.a(eta) ** 2 \ * mpmath.laguerre(p - 1, l, self.a(eta) ** 2)) kz = mpmath.sqrt(self.k**2 - eta**2) return ( mpmath.power(eta, np.abs(l) + 1) / mpmath.sqrt(kz) \ * mpmath.exp(-self.a(eta) ** 2 / 2) * (1 + pm * kz / self.k) \ * mpmath.power(eta / self.k, exponent) * lgr_factor)
def fmt1(t, m, low=None): # # F[m] = int u^{2m} e^{-t u^2} du # = 1/(2m+1) int e^{-t u^2} d u^{2m+1} # = 1/(2m+1) [e^{-t u^2} u^{2m+1}]_0^1 + (2t)/(2m+1) int u^{2m+2} e^{-t u^2} du # = 1/(2m+1) e^{-t} + (2t)/(2m+1) F[m+1] # = 1/(2m+1) e^{-t} + (2t)/(2m+1)(2m+3) e^{-t} + (2t)^2/(2m+1)(2m+3) F[m+2] # half = mpmath.mpf('.5') b = m + half e = half * mpmath.exp(-t) x = e s = e bi = b + 1 while x > .1**DECIMALS: x *= t / bi s += x bi += 1 f = s / b out = [f] for i in range(m): b -= 1 f = (e + t * f) / b out.append(f) return np.array(out)[::-1]
def fmt2_erfc(t, m, low=0): half = mpmath.mpf('.5') tt = mpmath.sqrt(t) low = mpmath.mpf(low) low2 = low * low f = mpmath.sqrt( mpmath.pi) / 2 / tt * (mpmath.erf(tt) - mpmath.erf(low * tt)) e = mpmath.exp(-t) e1 = mpmath.exp(-t * low2) * low b = half / t out = [f] for i in range(m): f = b * ((2 * i + 1) * f - e + e1) e1 *= low2 out.append(f) return np.array(out)
def mobility(self, z=1000, E=0, T=300, pn=None): if pn is None: Eg = self.band_gap(T, symbolic=False, electron_volts=False) # print Eg, self.__to_numeric(-Eg/(k*T)), mp.exp(self.__to_numeric(-Eg/(k*T))) pn = self.Nc(T, symbolic=False) * self.Nv(T, symbolic=False) * mp.exp( self.__to_numeric(-Eg / (k * T))) * 1e-12 # print pn N = 0 for dopant in self.dopants: N += dopant.concentration(z) N *= 1e-6 # print N mobility = {'mobility_e': {'mu_L': 0, 'mu_I': 0, 'mu_ccs': 0, 'mu_tot': 0}, 'mobility_h': {'mu_L': 0, 'mu_I': 0, 'mu_ccs': 0, 'mu_tot': 0}} for key in mobility.keys(): mu_L = self.reference[key]['mu_L0'] * (T / 300.0) ** (-self.reference[key]['alpha']) mu_I = (self.reference[key]['A'] * (T ** (3 / 2)) / N) / ( mp.log(1 + self.reference[key]['B'] * (T ** 2) / N) - self.reference[key]['B'] * (T ** 2) / ( self.reference[key]['B'] * (T ** 2) + N)) try: mu_ccs = (2e17 * (T ** (3 / 2)) / mp.sqrt(pn)) / (mp.log(1 + 8.28e8 * (T ** 2) * (pn ** (-1 / 3)))) X = mp.sqrt(6 * mu_L * (mu_I + mu_ccs) / (mu_I * mu_ccs)) except: mu_ccs = np.nan X = 0 # print X mu_tot = mu_L * (1.025 / (1 + ((X / 1.68) ** (1.43))) - 0.025) Field_coeff = (1 + (mu_tot * E * 1e-2 / self.reference[key]['v_s']) ** self.reference[key]['beta']) ** ( -1 / self.reference[key]['beta']) mobility[key]['mu_L'] = mu_L * 1e-4 mobility[key]['mu_I'] = mu_I * 1e-4 mobility[key]['mu_ccs'] = mu_ccs * 1e-4 mobility[key]['mu_tot'] = mu_tot * 1e-4 * Field_coeff return mobility
def nPDF(self, x): p = np.zeros(x.size) for i, xx in enumerate(x): gil_pelaez = lambda t: mp.re( self.char_fn(t) * mp.exp(-1j * t * xx)) cutoff = self.find_cutoff(1e-30) # Instead of finding roots, break up quadrature into degrees proportional to the # expected number of oscillations of e^(i xx t) within t = [0, cutoff] nosc = cutoff / (1 / max(10, np.abs(xx - self.mean()))) # roots = self.find_roots(gil_pelaez, cutoff) # if np.abs(xx - self.mean()) < 3 * np.sqrt(self.variance()): I = mp.quad(gil_pelaez, np.linspace(0, cutoff, nosc), maxdegree=10) # I = mp.quadosc(gil_pelaez, (0, cutoff), zeros=roots) # else: # For now, do not trust any results out greater than 3sigma # I = 0 # if np.abs(xx - self.mean()) >= 2 * np.sqrt(self.variance()): # I = self.asymptotic_expansion(xx) p[i] = 1 / np.pi * float(I) print(i) return p
def convert_rh_to_q(rh, temp_abs, pressure): esat = 611.2 * mpmath.exp(17.67 * (temp_abs - 273.16) / (temp_abs - 29.66)) # Stull q_abs = (ratio_rmm / (R_dry * temp_abs)) * rh / 100 * esat r = R_dry / (1 - q_abs * temp_abs / pressure * (R_vapour - R_dry)) specific_humidity = q_abs * r * temp_abs / pressure return specific_humidity
def q_eit_standing_wave(Delta, Deltac, Omega, g1d, periodLength, phaseShift): if Delta == Deltac: return 0 gprime = 1 - g1d kd = pi / periodLength Mcell = eye(2) for i in range(periodLength): OmegaAtThisSite = Omega * cos(kd * i + pi * phaseShift) beta3 = (g1d * (Delta - Deltac)) / ( (-2.0j * Delta + gprime) * (Delta - Deltac) + 2.0j * OmegaAtThisSite**2) M3 = matrix([[1 - beta3, -beta3], [beta3, 1 + beta3]]) Mf = matrix([[exp(1j * kd), 0], [0, exp(-1j * kd)]]) Mcell = Mf * M3 * Mcell ret = (1.0 / periodLength) * acos(-0.5 * (Mcell[0, 0] + Mcell[1, 1])) return ret
def fx_mmse(s, r): x = np.zeros_like(s) x_var = np.zeros_like(r) px = 0.5 for i in range(2 * NUM_ANT): sum_n1 = 0 sum_n2 = 0 sum_norm = 0 s_i = float(s[i, 0]) r_i = float(r[i, 0]) for x_cand in [-1 / mpmath.sqrt(2), 1 / mpmath.sqrt(2)]: tmp = mpmath.exp(-0.5 * (x_cand - r_i)**2 / s_i) pr_xcand = tmp / mpmath.sqrt(2 * mpmath.pi * s_i) norm = px * pr_xcand n1 = x_cand * norm n2 = 0.5 * norm sum_norm += norm sum_n1 += n1 sum_n2 += n2 x_i = float(sum_n1 / sum_norm) x_var_i = float(sum_n2 / sum_norm - x_i**2) x[i, 0] = x_i x_var[i, 0] = x_var_i return x, x_var
def logistic_gaussian(m, v): if m == oo: if v == oo: return oo return Float('1.0') if v == oo: return Float('0.5') mpmath.mp.dps = 500 mmpf = m._to_mpmath(500) vmpf = v._to_mpmath(500) # The integration routine below is obtained by substituting x = atanh(t) # into the definition of logistic_gaussian # # f = lambda x: mpmath.exp(-(x - mmpf) * (x - mmpf) / (2 * vmpf)) / (1 + mpmath.exp(-x)) # result = 1 / mpmath.sqrt(2 * mpmath.pi * vmpf) * mpmath.quad(f, [-mpmath.inf, mpmath.inf]) # # Such substitution makes mpmath.quad call much faster. tanhm = mpmath.tanh(mmpf) # Not really a precise threshold, but fine for our data if tanhm == mpmath.mpf('1.0'): return Float('1.0') f = lambda t: mpmath.exp(-(mpmath.atanh(t) - mmpf) ** 2 / (2 * vmpf)) / ((1 - t) * (1 + t + mpmath.sqrt(1 - t * t))) coef = 1 / mpmath.sqrt(2 * mpmath.pi * vmpf) int, err = mpmath.quad(f, [-1, 1], error=True) result = coef * int if mpmath.mpf('1e50') * abs(err) > abs(int): print(f"Suspiciously big error when evaluating an integral for logistic_gaussian({m}, {v}).") print(f"Integral: {int}") print(f"integral error estimate: {err}") print(f"Coefficient: {coef}") print(f"Result (Coefficient * Integral): {result}") return Float(result)
def gauss_warp_arb(X, l1, l2, lw, x0): r"""Warps the `X` coordinate with a Gaussian-shaped divot. .. math:: l = l_1 - (l_1 - l_2) \exp\left ( -4\ln 2\frac{(X-x_0)^2}{l_{w}^{2}} \right ) Parameters ---------- X : :py:class:`Array`, (`M`,) or scalar float `M` locations to evaluate length scale at. l1 : positive float Global value of the length scale. l2 : positive float Pedestal value of the length scale. lw : positive float Width of the dip. x0 : float Location of the center of the dip in length scale. Returns ------- l : :py:class:`Array`, (`M`,) or scalar float The value of the length scale at the specified point. """ if isinstance(X, scipy.ndarray): if isinstance(X, scipy.matrix): X = scipy.asarray(X, dtype=float) return l1 - (l1 - l2) * scipy.exp(-4.0 * scipy.log(2.0) * (X - x0)**2.0 / (lw**2.0)) else: return l1 - (l1 - l2) * mpmath.exp(-4.0 * mpmath.log(2.0) * (X - x0)**2.0 / (lw**2.0))
def normal_cdf_moment_ratio(n, x): mpmath.mp.dps = 500 xmpf = x._to_mpmath(500) nmpf = n._to_mpmath(500) if x < 0: return Float(mpmath.power(2, -0.5 - nmpf / 2) * mpmath.hyperu(nmpf / 2 + 0.5, 0.5, xmpf * xmpf / 2)) return Float(mpmath.exp(xmpf * xmpf / 4) * mpmath.pcfu(0.5 + nmpf, -xmpf))
def ila_integrand_lp1_b(self, eta, rloc): k, p, l = self.k, self.p, self.l kz = mpmath.sqrt(k**2 - eta**2) res = mpmath.power(eta, np.abs(l) + 1) / mpmath.sqrt(kz) \ * mpmath.laguerre(p, l, self.a(eta) ** 2) * mpmath.exp(-self.a(eta) ** 2 / 2) \ * (1 + kz / k) * mpmath.besselj(l, eta * rloc / k) return res
def test_tklmbda_zero_shape(self): # When lmbda = 0 the CDF has a simple closed form one = mpmath.mpf(1) assert_mpmath_equal( lambda x: sp.tklmbda(x, 0), lambda x: one/(mpmath.exp(-x) + one), [Arg()], rtol=1e-7)
def test_weighted_logsumexp(): x = [1.0, 0.5, -1.0, -2.0] w = [3.5, 0.0, 1.0, 3.0] y = logsumexp(x, weights=w) wsum = mpmath.fsum([wi * mpmath.exp(xi) for xi, wi in zip(x, w)]) expected = mpmath.log(wsum) assert mpmath.almosteq(y, expected)
def pdf(x, p, b, loc=0, scale=1): """ Probability density function of the generalized inverse Gaussian distribution. The PDF for x > loc is: z**(p - 1) * exp(-b*(z + 1/z)/2)) --------------------------------- s * K_p(b) where s is the scale, z = (x - loc)/s, and K_p(b) is the modified Bessel function of the second kind. For x <= loc, the PDF is zero. """ x = mpmath.mpf(x) p = mpmath.mpf(p) b = mpmath.mpf(b) loc = mpmath.mpf(loc) scale = mpmath.mpf(scale) if x <= loc: return mpmath.mp.zero z = (x - loc) / scale return (mpmath.power(z, p - 1) * mpmath.exp(-b * (z + 1 / z) / 2) / (2 * mpmath.besselk(p, b)) / scale)
def __call__(self, Xi, Xj, sigmaf, l1, l2, lw, x0): """Evaluate the covariance function between points `Xi` and `Xj`. Parameters ---------- Xi, Xj : :py:class:`Array`, :py:class:`mpf` or scalar float Points to evaluate covariance between. If they are :py:class:`Array`, :py:mod:`scipy` functions are used, otherwise :py:mod:`mpmath` functions are used. sigmaf : scalar float Prefactor on covariance. l1, l2, lw, x0 : scalar floats Parameters of length scale warping function, passed to :py:attr:`warp_function`. Returns ------- k : :py:class:`Array` or :py:class:`mpf` Covariance between the given points. """ li = self.warp_function(Xi, l1, l2, lw, x0) lj = self.warp_function(Xj, l1, l2, lw, x0) if isinstance(Xi, scipy.ndarray): if isinstance(Xi, scipy.matrix): Xi = scipy.asarray(Xi, dtype=float) Xj = scipy.asarray(Xj, dtype=float) return sigmaf**2.0 * (scipy.sqrt(2.0 * li * lj / (li**2.0 + lj**2.0)) * scipy.exp(-(Xi - Xj)**2.0 / (li**2 + lj**2))) else: return sigmaf**2.0 * (mpmath.sqrt(2.0 * li * lj / (li**2.0 + lj**2.0)) * mpmath.exp(-(Xi - Xj)**2.0 / (li**2 + lj**2)))
def zp(x): """ plasma dispersion function using complementary error function in mpmath library. """ return -mp.sqrt(mp.pi) * mp.exp(-x**2) * mp.erfi(x) + mpc(0, 1) * mp.sqrt(mp.pi) * mp.exp(-x**2)
def skewness(mu=0, sigma=1): """ Skewness of the lognormal distribution. """ _validate_sigma(sigma) sigma2 = sigma**2 return (mpmath.exp(sigma2) + 2) * mpmath.sqrt(mpmath.expm1(sigma2))
def BSLaplace(S,K,T,t,r,sig,N,phi): """Solving the Black Scholes PDE in the Laplace domain""" x = ln(S/K) r = mpf(r);sig = mpf(sig);T = mpf(T);t=mpf(t) S = mpf(S);K = mpf(K);x=mpf(x) mu = r - 0.5*(sig**2) tau = T - t c1 = mpf('0.5017') c2 = mpf('0.6407') c3 = mpf('0.6122') c4 = mpc('0','0.2645') ans = 0.0 h = 2*pi/N h = mpf(h) for k in range(N/2): # Use symmetry theta = -pi + (k+0.5)*h z = N/tau*(c1*theta/tan(c2*theta) - c3 + c4*theta) dz = N/tau*(-c1*c2*theta/(sin(c2*theta)**2) + c1/tan(c2*theta)+c4) eps1 = (-mu + sqrt(mu**2 + 2*(sig**2)*(z+r)))/(sig**2) eps2 = (-mu - sqrt(mu**2 + 2*(sig**2)*(z+r)))/(sig**2) b1 = 1/(eps1-eps2)*(eps2/(z+r) + (1 - eps2)/z) b2 = 1/(eps1-eps2)*(eps1/(z+r) + (1 - eps1)/z) ans += exp(z*tau)*bs(x,b1,b2,eps1,eps2,z,r,phi)*dz val = (K*(h/(2j*pi)*ans)).real return 2*val
def __call__(self, Xi, Xj, sigmaf, l1, l2, lw, x0): """Evaluate the covariance function between points `Xi` and `Xj`. Parameters ---------- Xi, Xj : :py:class:`Array`, :py:class:`mpf` or scalar float Points to evaluate covariance between. If they are :py:class:`Array`, :py:mod:`scipy` functions are used, otherwise :py:mod:`mpmath` functions are used. sigmaf : scalar float Prefactor on covariance. l1, l2, lw, x0 : scalar floats Parameters of length scale warping function, passed to :py:attr:`warp_function`. Returns ------- k : :py:class:`Array` or :py:class:`mpf` Covariance between the given points. """ li = self.warp_function(Xi, l1, l2, lw, x0) lj = self.warp_function(Xj, l1, l2, lw, x0) if isinstance(Xi, scipy.ndarray): if isinstance(Xi, scipy.matrix): Xi = scipy.asarray(Xi, dtype=float) Xj = scipy.asarray(Xj, dtype=float) return sigmaf**2.0 * ( scipy.sqrt(2.0 * li * lj / (li**2.0 + lj**2.0)) * scipy.exp(-(Xi - Xj)**2.0 / (li**2 + lj**2)) ) else: return sigmaf**2.0 * ( mpmath.sqrt(2.0 * li * lj / (li**2.0 + lj**2.0)) * mpmath.exp(-(Xi - Xj)**2.0 / (li**2 + lj**2)) )
def Asian(S, K, T, t, sig, r, N): # Assigning multi precision S = mpf(S) K = mpf(K) sig = mpf(sig) T = mpf(T) t = mpf(t) r = mpf(r) # Geman and Yor's variable tau = mpf(((sig**2) / 4) * (T - t)) v = mpf(2 * r / (sig**2) - 1) alp = mpf(sig**2 / (4 * S) * K * T) beta = mpf(-1 / (2 * alp)) tau = mpf(tau) N = mpf(N) # Initiate the stepsize h = 2 * pi / N mp.dps = 100 c1 = mpf('0.5017') c2 = mpf('0.6407') c3 = mpf('0.6122') c4 = mpc('0', '0.2645') # The for loop is evaluating the Laplace inversion at each point theta which is based on the trapezoidal # rule ans = 0.0 for k in range(N / 2): # N/2 : symmetry theta = -pi + (k + 0.5) * h z = 2 * v + 2 + N / tau * (c1 * theta / tan(c2 * theta) - c3 + c4 * theta) dz = N / tau * (-c1 * c2 * theta / sin(c2 * theta)**2 + c1 / tan(c2 * theta) + c4) zz = N / tau * (c1 * theta / tan(c2 * theta) - c3 + c4 * theta) mu = sqrt(v**2 + 2 * z) a = mu / 2 - v / 2 - 1 b = mu / 2 + v / 2 + 2 G = (2 * alp)**(-a) * gamma(b) / gamma(mu + 1) * hyp1f1( a, mu + 1, beta) / (z * (z - 2 * (1 + v))) ans += exp(zz * tau) * G * dz return 2 * exp(tau * (2 * v + 2)) * exp( -r * (T - t)) * 4 * S / (T * sig**2) * h / (2j * pi) * ans
def rate_cov_random_switching_v2(k_matrix, alpha, rate_th, lamda_user, q_on, bw): k_mat = k_matrix.copy() num_tiers = k_mat.shape[0] #density = k_mat[:,2] #density[-1] = q_on density = np.array( k_mat[:, 2] * ([1] * (num_tiers - 1) + [q_on])) #hence last row always corresponds to small cells #print(density) power = gb.db2pow(k_mat[:, 1]) small_cell_idx = num_tiers - 1 #indicates the index of the small cell #density[small_cell_idx] = q_on # initialize the integration result matrix tier_integ_result = np.zeros(num_tiers, float) #integration results of each tier area_tiers = np.zeros(num_tiers, float) #area of the tiers threshold_tier = np.zeros(num_tiers, float) #threshold of the tiers N_k = np.zeros(num_tiers, float) #number of users in each tier first_exp_term = np.zeros(num_tiers, float) #first exponential term in integral second_exp_term = np.zeros(num_tiers, float) #second exponential term in integral third_exp_term = np.zeros(num_tiers, float) #third exponential term in integral for i in range(num_tiers): area_tiers[i] = A_k(density, power, alpha, i) #N_k[i] = 0 if density[i]==0 else 1.28*lamda_user*area_tiers[i]/density[i] N_k[i] = 0 if density[ i] == 0 else 1 + 1.28 * lamda_user * area_tiers[i] / density[i] threshold_tier[i] = mp.inf if ( bw == 0) else 2**(rate_th * N_k[i] / bw) - 1 first_exp_term[i] = threshold_tier[i] * 1 / power[i] third_exp_term[i] = mp.pi * sum(density * (power / power[i])**(2 / alpha)) Z_term = 0 if (threshold_tier[i] == 0) else threshold_tier[i]**( 2 / alpha) * mp.quad(lambda u: 1 / (1 + u**(alpha / 2)), [(threshold_tier[i])**(-2 / alpha), mp.inf]) second_exp_term[i] = third_exp_term[i] * Z_term for k in range(num_tiers): tier_integ_result[k] = mp.quad( lambda y: y * mp.exp(-first_exp_term[k] * y**alpha) * mp.exp( -second_exp_term[k] * y**2) * mp.exp(-third_exp_term[k] * y**2 ), [0, mp.inf]) rate_cov_prob = 2 * mp.pi * sum(density * tier_integ_result) return (rate_cov_prob)
def compute_MI_origemcee(seq_matQ,seq_matR,batches,ematQ,ematR,gamma,R_0): # preliminaries n_seqs = len(batches) n_batches = int(batches.max()) + 1 # assumes zero indexed batches n_bins = 1000 #energies = sp.zeros(n_seqs) f = sp.zeros((n_batches,n_seqs)) # compute energies # for i in range(n_seqs): # energies[i] = sp.sum(seqs[:,:,i]*emat) # alternate way energies = np.zeros(n_seqs) for i in range(n_seqs): RNAP = (seq_matQ[:,:,i]*ematQ).sum() TF = (seq_matR[:,:,i]*ematR).sum() + R_0 energies[i] = -RNAP + mp.log(1 + mp.exp(-TF - gamma)) - mp.log(1 + mp.exp(-TF)) # sort energies inds = sp.argsort(energies) for i,ind in enumerate(inds): f[batches[ind],i] = 1.0/n_seqs # batches aren't zero indexed # bin and convolve with Gaussian f_binned = sp.zeros((n_batches,n_bins)) for i in range(n_batches): f_binned[i,:] = sp.histogram(f[i,:].nonzero()[0],bins=n_bins,range=(0,n_seqs))[0] #f_binned = f_binned/f_binned.sum() f_reg = sp.ndimage.gaussian_filter1d(f_binned,0.04*n_bins,axis=1) f_reg = f_reg/f_reg.sum() # compute marginal probabilities p_b = sp.sum(f_reg,axis=1) p_s = sp.sum(f_reg,axis=0) # finally sum to compute the MI MI = 0 for i in range(n_batches): for j in range(n_bins): if f_reg[i,j] != 0: MI = MI + f_reg[i,j]*sp.log2(f_reg[i,j]/(p_b[i]*p_s[j])) print MI return MI,f_reg
def __init__(self, number_of_layers, vector_d, vector_W, mass_vector, exp_index, toll): ''' Инициализация входных данных''' # Колличество уровней с постоянной эффективной массой # в гетероструктуре self.number_of_layers = number_of_layers # Вектор размерности n с размерами каждого слоя self.structure_width_vector = vector_d # Вектор размерности n с потенциалом для каждого уровня self.barriers_high_vector = vector_W # Вектор размерности n для эффективных масс в каждом слое self.mass_vector = mass_vector self.toll = toll self.exp_index_list = exp_index self.roots = [] # Разбиение экспонециального барьера на ступеньки new_D = np.zeros(self.toll * len(self.exp_index_list) + (self.number_of_layers - len(self.exp_index_list))) new_W = np.zeros(self.toll * len(self.exp_index_list) + (self.number_of_layers - len(self.exp_index_list))) k = 0 if not self.exp_index_list == []: for i in range(self.number_of_layers): if i not in self.exp_index_list: new_D[i + k] = self.structure_width_vector[i] new_W[i + k] = self.barriers_high_vector[i] else: for n in range(self.toll): new_D[i + k + n] = self.structure_width_vector[i] / self.toll new_W[i + k + n] = self.barriers_high_vector[i] * ( 1 - mp.exp(-(10 * n / self.toll))) k = k + (self.toll - 1) else: new_D = self.structure_width_vector new_W = self.barriers_high_vector self.structure_width_vector = new_D self.barriers_high_vector = new_W self.x_max = 0 for ind in range(len(self.structure_width_vector)): if not ind == 0: self.x_max = self.x_max + self.structure_width_vector[ind] self.a = np.zeros(len(self.structure_width_vector) + 1) for num in range(len(self.a)): if num == 0: self.a[num] = self.structure_width_vector[num] elif num == 1: self.a[num] = 0 else: self.a[num] = self.structure_width_vector[num - 1] self.a[num] = self.a[num] + self.a[num - 1]
def mle(x, loc=None, scale=None): """ Maximum likelihood estimates for the Gumbel distribution. `x` must be a sequence of numbers--it is the data to which the Gumbel distribution is to be fit. If either `loc` or `scale` is not None, the parameter is fixed at the given value, and only the other parameter will be fit. Returns maximum likelihood estimates of the `loc` and `scale` parameters. Examples -------- Imports and mpmath configuration: >>> import mpmath >>> mpmath.mp.dps = 20 >>> from mpsci.distributions import gumbel_min The data to be fit: >>> x = [6.86, 14.8 , 15.65, 8.72, 8.11, 8.15, 13.01, 13.36] Unconstrained MLE: >>> gumbel_min.mle(x) (mpf('12.708439639698245696235'), mpf('2.878444823276260896075')) If we know the scale is 2, we can add the argument `scale=2`: >>> gumbel_min.mle(x, scale=2) (mpf('13.18226169025112165358'), mpf('2.0')) """ with mpmath.extradps(5): x = [mpmath.mpf(xi) for xi in x] if scale is None and loc is not None: # Estimate scale with fixed loc. loc = mpmath.mpf(loc) # Initial guess for findroot. s0 = stats.std([xi - loc for xi in x]) scale = mpmath.findroot( lambda t: _mle_scale_with_fixed_loc(t, x, loc), s0) return loc, scale if scale is None: scale = _solve_mle_scale(x) else: scale = mpmath.mpf(scale) if loc is None: ex = [mpmath.exp(xi / scale) for xi in x] loc = scale * mpmath.log(stats.mean(ex)) else: loc = mpmath.mpf(loc) return loc, scale
def test_pow_E(self): # E ^ x expr = Expression("Power", Symbol("E"), Symbol("x")) args = [CompileArg("System`x", real_type)] cfunc = _compile(expr, args) for _ in range(1000): x = random.random() self.assertAlmostEqual(cfunc(x), mpmath.exp(x))
def fmt2_erfc(t, m, low=0, factor=mpmath.mpf(1)): tt = mpmath.sqrt(t) low = mpmath.mpf(low) low2 = low * low f = factor * mpmath.sqrt( mpmath.pi) / 2. / tt * (mpmath.erf(tt) - mpmath.erf(low * tt)) e = mpmath.exp(-t) e1 = mpmath.exp(-t * low2) * low e *= factor e1 *= factor b = mpmath.mpf('.5') / t out = [f] for i in range(m): f = b * ((2 * i + 1) * f - e + e1) e1 *= low2 out.append(f) return np.array(out)
def get_prob_poisson(events, length, rate): """ P(k, lambda = t * rate) = """ avg_events = mpmath.fmul(rate, length) # lambda prob = mpmath.fmul((-1), avg_events) for i in range(1, events + 1): prob = mpmath.fadd(prob, mpmath.log(mpmath.fdiv(avg_events, i))) prob = mpmath.exp(prob) return prob
def target_evaluation_func(self, current_clustering, context=None): #print(current_labeling) energy = self.calculate_energy(current_clustering) temperature = 1000 #print(energy) if context is not None: temperature = self.cooling_schedule(context.iteration_counter) return mpmath.exp(-(energy / temperature))