def logConfidence(x, R, clip=0): """ Estimate the probability of x NOT beeing a random observation from a lognormal distribution that is described by a set of random values. @param x: observed value @type x: float @param R: sample of random values @type R: [float] @param clip: clip zeros at this value 0->don't clip (default: 0) @type clip: float @return: confidence that x is not random, median of random distr. @rtype: (float, float) """ if clip and 0 in R: R = N.clip(R, clip, max(R)) if clip and x == 0: x = clip ## remove 0 instead of clipping R = N.compress(R, R) if x == 0: return 0, 0 ## get mean and stdv of log-transformed random sample alpha = N.average(N.log(R)) n = len(R) beta = N.sqrt(N.sum(N.power(N.log(R) - alpha, 2)) / (n - 1.)) return logArea(x, alpha, beta), logMedian(alpha)
def FugaP(self, Z, A, B): """ Fugacity Coefficient of Pure Substances""" L = (1 / (2 * sqrt(2))) * log( (Z + B * (1 + sqrt(2))) / (Z + B * (1 - sqrt(2)))) LogFug = Z - 1 - log(Z - B) - A / B * L Fug = exp(LogFug) return Fug
def logConfidence( x, R, clip=0 ): """ Estimate the probability of x NOT beeing a random observation from a lognormal distribution that is described by a set of random values. @param x: observed value @type x: float @param R: sample of random values @type R: [float] @param clip: clip zeros at this value 0->don't clip (default: 0) @type clip: float @return: confidence that x is not random, median of random distr. @rtype: (float, float) """ if clip and 0 in R: R = N.clip( R, clip, max( R ) ) if clip and x == 0: x = clip ## remove 0 instead of clipping R = N.compress( R, R ) if x == 0: return 0, 0 ## get mean and stdv of log-transformed random sample alpha = N.average( N.log( R ) ) n = len( R ) beta = N.sqrt(N.sum(N.power(N.log( R ) - alpha, 2)) / (n - 1.)) return logArea( x, alpha, beta ), logMedian( alpha )
def FugaM(self, Z, A_i, B_i, A, B): L = (1 / (2 * sqrt(2))) * log( (Z + B * (1 + sqrt(2))) / (Z + B * (1 - sqrt(2)))) LogFug = B_i / B * (Z - 1) - log(Z - B) + A / B * (B_i / B - 2 * A_i / A) * L Fug = exp(LogFug) return Fug
def dS(self, Z, dadT, b, B, R, T): """ This calculate the residual Entropy (Z,dadT,b,B,R,T)->dA)""" u = self.u w = self.w L = log((2 * Z - B * (u - sqrt(u * u - 4 * w))) / (2 * Z + B * (u - sqrt(u * u - 4 * w)))) return R * log(Z - B) - dadT / (b * sqrt(u * u - 4 * w)) * L
def dA(self, Z, a, b, B, R, T): """ This calculate the residual helmozt energy (Z,a,b,B,R,T)->dA)""" u = self.u w = self.w L = log((2 * Z - B * (u - sqrt(u * u - 4 * w))) / (2 * Z + B * (u - sqrt(u * u - 4 * w)))) return a / (b * sqrt(u * u - 4 * w)) * L - R * T * log(Z - B)
def FugaP(self,Z,A,B): """ Fugacity Coefficient of Pure Substances""" ## print Z-B L = ( 1/( 2*sqrt(2) ) ) * log( ( Z +B*(1+sqrt(2) ) ) / ( Z +B*(1-sqrt(2) ) ) ) LogFug = Z-1 - log(Z-B) - A/B*L Fug = exp( LogFug ) return Fug
def T(self,P,m): P= P/self.Factor T = [] T_j = array( m["HAR_B"] /( log(P)-m["HAR_A"] ) ) ## print m["HAR_D"] for j in range(len(m["HAR_A"])): T_r = T_j[j] i= 1 while i<=20: fP_i = log(P_i) - exp( m["HAR_A"][j]+m["HAR_B"][j] / ( T_r ) + m["HAR_C"][j]*log(T_r) + m["HAR_D"][j]*P/power(T_r,2) ) dP_i = m["HAR_C"][j]/T_r- m["HAR_B"][j]/power(T_r,2) - 2*m["HAR_D"][j]*P/power(T_r,3) i +=1 if abs(fP_i)<=1e-3: T.append(T_r) break T = T-fP_i/dPi return T
def shannon_entropy(x, n_bins, _range): d = density(x, n_bins, range = _range, steps = 0) delta_x = d[1,0] - d[0,0] p = clip(d[:,1], 1.e-10, 1.e10) p = p / (Numeric.sum(p) * delta_x) S = - delta_x * Numeric.sum(p * Numeric.log(p)) return S
def shannon_entropy(x, n_bins, _range): d = density(x, n_bins, range=_range, steps=0) delta_x = d[1, 0] - d[0, 0] p = clip(d[:, 1], 1.e-10, 1.e10) p = p / (Numeric.sum(p) * delta_x) S = -delta_x * Numeric.sum(p * Numeric.log(p)) return S
def P(self,T,m): P_j= array( exp( m["HAR_A"]+m["HAR_B"] / T + m["HAR_C"]*log(T) ) ) P = [] T1 = log(T) T2 = power(T,2) ## print m["HAR_D"] for j in range(len(m["HAR_A"])): P_r = P_j[j] i= 1 ## print j,P_r,T2 while i<=20: P_i = m["HAR_A"][j]+m["HAR_B"][j]/ T + m["HAR_C"][j]*T1 + m["HAR_D"][j]*P_r/T2 ## print P_i P_i = exp(P_i) ## print P_i*0.13332236 i +=1 if abs(P_i-P_r)<=1: P.append(P_i) break P_r = P_i return array(P) * self.Factor
def logConfidence(x, R, clip=1e-32): """ Estimate the probability of x NOT beeing a random observation from a lognormal distribution that is described by a set of random values. The exact solution to this problem is in L{Biskit.Statistics.lognormal}. @param x: observed value @type x: float @param R: sample of random values; 0 -> don't clip (default: 1e-32) @type R: [float] @param clip: clip zeros at this value @type clip: float @return: confidence that x is not random, mean of random distrib. @rtype: (float, float) """ if clip and 0 in R: R = N.clip(R, clip, max(R)) ## get mean and stdv of log-transformed random sample mean = N.average(N.log(R)) n = len(R) stdv = N.sqrt(N.sum(N.power(N.log(R) - mean, 2)) / (n - 1.)) ## create dense lognormal distribution representing the random sample stop = max(R) * 50.0 step = stop / 100000 start = step / 10.0 X = [(v, p_lognormal(v, mean, stdv)) for v in N.arange(start, stop, step)] ## analyse distribution d = Density(X) return d.findConfidenceInterval(x * 1.0)[0], d.average()
def logConfidence( x, R, clip=1e-32 ): """ Estimate the probability of x NOT beeing a random observation from a lognormal distribution that is described by a set of random values. The exact solution to this problem is in L{Biskit.Statistics.lognormal}. @param x: observed value @type x: float @param R: sample of random values; 0 -> don't clip (default: 1e-32) @type R: [float] @param clip: clip zeros at this value @type clip: float @return: confidence that x is not random, mean of random distrib. @rtype: (float, float) """ if clip and 0 in R: R = N.clip( R, clip, max( R ) ) ## get mean and stdv of log-transformed random sample mean = N.average( N.log( R ) ) n = len( R ) stdv = N.sqrt(N.sum(N.power(N.log( R ) - mean, 2)) / (n - 1.)) ## create dense lognormal distribution representing the random sample stop = max( R ) * 50.0 step = stop / 100000 start = step / 10.0 X = [(v, p_lognormal(v, mean, stdv) ) for v in N.arange(start, stop, step)] ## analyse distribution d = Density( X ) return d.findConfidenceInterval( x * 1.0 )[0], d.average()
def entropy( self, emmProb, nullProb ): """ calculate entropy for normalized probabilities scaled by aa freq. emmProb & nullProb is shape 1,len(alphabet) @param emmProb: emmission probabilities @type emmProb: array @param nullProb: null probabilities @type nullProb: array @return: entropy value @rtype: float """ ## remove zeros to avoid log error emmProb = N.clip(emmProb, 1.e-10, 1.) return N.sum( emmProb * N.log(emmProb/nullProb) )
def logArea(x, alpha, beta): """ Area of the smallest interval of a lognormal distribution that still includes x. @param x: border value @type x: float @param alpha: mean of log-transformed distribution @type alpha: float @param beta: standarddev of log-transformed distribution @type beta: float @return: probability that x is NOT drawn from the given distribution @rtype: float """ r_max = N.exp(alpha - beta**2) if x < r_max: x = r_max**2 / x upper = (N.log(x) - alpha) / beta return 0.5 * (erf(upper / N.sqrt(2)) - erf(-(upper + 2*beta) / N.sqrt(2)))
def entropy(self, emmProb, nullProb): """ Calculate the Kullback-Leibler distance between the observed and the background amino acid distribution at a given position. High values mean high conservation. Empty (all 0) emmission probabilities yield score 0. See also:BMC Bioinformatics. 2006; 7: 385 emmProb & nullProb is shape 1,len(alphabet) @param emmProb: emmission probabilities @type emmProb: array @param nullProb: null probabilities @type nullProb: array @return: relative entropy score @rtype: float """ ## avoid log error if N.sum(emmProb) == 0.: return 0. return N.sum(emmProb * N.log(emmProb / nullProb))
def entropy( self, emmProb, nullProb ): """ Calculate the Kullback-Leibler distance between the observed and the background amino acid distribution at a given position. High values mean high conservation. Empty (all 0) emmission probabilities yield score 0. See also:BMC Bioinformatics. 2006; 7: 385 emmProb & nullProb is shape 1,len(alphabet) @param emmProb: emmission probabilities @type emmProb: array @param nullProb: null probabilities @type nullProb: array @return: relative entropy score @rtype: float """ ## avoid log error if N.sum( emmProb ) == 0.: return 0. return N.sum( emmProb * N.log(emmProb/nullProb) )
def logArea(x, alpha, beta): """ Area of the smallest interval of a lognormal distribution that still includes x. @param x: border value @type x: float @param alpha: mean of log-transformed distribution @type alpha: float @param beta: standarddev of log-transformed distribution @type beta: float @return: probability that x is NOT drawn from the given distribution @rtype: float """ r_max = N.exp(alpha - beta**2) if x < r_max: x = r_max**2 / x upper = (N.log(x) - alpha) / beta return 0.5 * (erf(upper / N.sqrt(2)) - erf(-(upper + 2 * beta) / N.sqrt(2)))
def FugaM(self, Z, A_i, B_i, A, B): LogFug = B_i / B * (Z - 1) - log( Z - B) + A / B * (B_i / B - 2 * A_i / A) * log(1 + B / Z) Fug = exp(LogFug) return Fug
def FugaP(self, Z, A, B): """ Fugacity Coefficient of Pure Substances""" LogFug = Z - 1 - log(Z - B) - A / B * log(1 + B / Z) Fug = exp(LogFug) return Fug
def Thermal(self, model, case): # Thermal Calcs ## Ac = model["RK_A"] T = case.Prop["T"] P = case.Prop["P"] a = case.Prop["a"] xf = case.Prop["xf"] yf = case.Prop["yf"] A = case.Prop["A"] B = case.Prop["B"] Zli = case.Prop["Zli"] Zvi = case.Prop["Zvi"] V_li = case.Prop["Vli"] V_vi = case.Prop["Vvi"] x = case.Prop["x"] ac = model["RK_A"] b = model["RK_B"] Cp, H0, S0 = self.Thermo.Calc(model["CP_A"], model["CP_B"], model["CP_C"], model["CP_D"], T, R) dadT = case.Prop["dadT"] d2adT2 = case.Prop["d2adT2"] # Liquid dA_L = self.dA(Zli, a, b, B, R, T) dS_L = self.dS(Zli, dadT, b, B, R, T) dH_L = self.dH(Zli, dA_L, dS_L, R, T) INTd2PdT2_L = self.INTd2PdT2(Zli, d2adT2, b, B) dCv_L = self.dCv(INTd2PdT2_L, R, T) dPdT_L = self.dPdT(dadT, b, R, T, V_li) dPdV_L = self.dPdV(a, b, R, T, V_li) dCp_L = self.dCp(dCv_L, dPdT_L, dPdV_L, T) # Vapor dA_V = self.dA(Zvi, a, b, B, R, T) dS_V = self.dS(Zvi, dadT, b, B, R, T) dH_V = self.dH(Zvi, dA_V, dS_V, R, T) INTd2PdT2_V = self.INTd2PdT2(Zvi, d2adT2, b, B) dCv_V = self.dCv(INTd2PdT2_V, R, T) dPdT_V = self.dPdT(dadT, b, R, T, V_vi) dPdV_V = self.dPdV(a, b, R, T, V_vi) dCp_V = self.dCp(dCv_V, dPdT_V, dPdV_V, T) # Mix HV_i = model["HV"] * power(absolute((T - model["TC"]) / (model["TB"] - model["TC"])), 0.38) model["HV_T"] = HV_i Ho_M_v = sum(yf * (H0 - dH_V)) Ho_M_l = sum(xf * (H0 - dH_L)) So_M_v = sum(yf * (S0 - dS_V)) - R * sum(yf * log(yf)) So_M_l = sum(xf * (S0 - dS_L)) - R * sum(xf * log(xf)) H_v = Ho_M_v H_l = Ho_M_l - sum(xf * HV_i) HV = H_v - H_l SV = HV / T S_v = So_M_v S_l = So_M_l - SV # Cp and Cv : Cp-Cv=R,Cv=Cp-R Cv_v = Cp - R case.Prop["Cp_v"] = Cp - dCp_V case.Prop["Cv_v"] = Cv_v - dCv_V # Save result in the case Hentalpy case.Prop["H"] = H_v * (case.Prop["FracVap"]) + H_l * (1 - case.Prop["FracVap"]) case.Prop["H_l"] = H_l case.Prop["H_v"] = H_v case.Prop["HV"] = HV # Save result in the case Emtropy case.Prop["S"] = S_v * (case.Prop["FracVap"]) + S_l * (1 - case.Prop["FracVap"]) case.Prop["S_l"] = S_l case.Prop["S_v"] = S_v G_l = H_l - T * S_l G_v = H_v - T * S_v # Save result in the case Free case.Prop["G"] = G_v * (case.Prop["FracVap"]) + G_l * (1 - case.Prop["FracVap"]) case.Prop["G_l"] = G_l case.Prop["G_v"] = G_v U_l = H_l - sum(xf * P * V_li) U_v = H_v - sum(yf * P * V_vi) # Save result in the case Free case.Prop["U"] = U_v * (case.Prop["FracVap"]) + U_l * (1 - case.Prop["FracVap"]) case.Prop["U_l"] = U_l case.Prop["U_v"] = U_v A_l = U_l - T * S_l A_v = U_v - T * S_v # Save result in the case Free case.Prop["AFree"] = U_v * (case.Prop["FracVap"]) + U_l * (1 - case.Prop["FracVap"]) case.Prop["AFree_l"] = A_l case.Prop["AFree_v"] = A_v # Hentapy and gibbs formation Energy case.Prop["HF"] = sum(model["DELHF"] * x) case.Prop["GF"] = sum(model["DELGF"] * x)
def Entropy(self,a,b,c,d,T): S0 = a*log(T) + b*(T-1) + c* ( pow( T,2 )-1 )/2 +d*( pow( T,3 )-1 )/3 ## S_std = a*log(T_ref) + b*(T_ref ) + c* ( pow( T_ref,2 ) )/2 +d*( pow( T_ref,3 ) )/3 return S0
def Isotermic(self, model, case): xm = case.Prop["x"] T = case.Prop["T"] P = case.Prop["P"] Ac = model["RK_A"] b_i = model["RK_B"] PreVap = self.PV.P(T, model) ## print "K_I",PreVap/P AlphaT = 1 / sqrt(T) case.Prop["MolWt"] = sum(xm * model["MoleWt"]) a_i = Ac * AlphaT A_i = (a_i * P) / pow(R * T, 2) B_i = (b_i * P) / (R * T) Zl_i = self.EOS.ZL(A_i, B_i) Zv_i = self.EOS.ZG(A_i, B_i) CoeFugo_v = self.FugaP(Zv_i, A_i, B_i) CoeFugo_l = self.FugaP(Zl_i, A_i, B_i) yf = xm xf = xm k_i = 1 RFrac = 2 #Iteration to calculate the Fractio Vapor while k_i <= 10: A_vi = MixingRules.MolarK2(yf, A_i, k=0) A_li = MixingRules.MolarK2(xf, A_i, k=0) B_v = MixingRules.Molar(yf, B_i) A_v = MixingRules.MolarK(yf, A_i, k=0) B_l = MixingRules.Molar(xf, B_i) A_l = MixingRules.MolarK(xf, A_i, k=0) Z_v = self.EOS.ZG(A_v, B_v) Z_l = self.EOS.ZL(A_l, B_l) CoeFugM_v = self.FugaM(Z_v, A_vi, B_i, A_v, B_v) CoeFugM_l = self.FugaM(Z_l, A_li, B_i, A_l, B_l) fi = P * CoeFugM_v * yf ki = CoeFugM_l / CoeFugM_v FrVap, xf, yf = Flash(ki, xm) Z = FrVap * Z_v + (1 - FrVap) * Z_l ## print Z if (RFrac - FrVap) <= 1e-10: break RFrac = FrVap k_i += 1 V_l = Z_l * R * T / P V_v = Z_v * R * T / P #Thermal Calcs ## print model.keys() Cp, H0, S0 = self.Thermo.Calc(model["CP_A"], model["CP_B"], model["CP_C"], model["CP_D"], T, R) dadT = self.dadT(Ac, T) d2adT2 = self.d2adT2(Ac, T) dadT_l = MixingRules.MolarK(xf, dadT, k=0) d2adT2_l = MixingRules.MolarK(xf, d2adT2, k=0) #Liquid a_l = MixingRules.MolarK(xf, a_i, k=0) b_l = MixingRules.Molar(xf, b_i) dA_l = self.EOS.dA(Z_l, a_l, b_l, B_l, R, T) dS_l = self.EOS.dS(Z_l, dadT_l, b_l, B_l, R, T) dH_l = self.EOS.dH(Z_l, dA_l, dS_l, R, T) INTd2PdT2_l = self.EOS.INTd2PdT2(Z_l, d2adT2_l, b_l, B_l) dCv_l = self.EOS.dCv(INTd2PdT2_l, R, T) dPdT_l = self.EOS.dPdT(dadT_l, b_l, R, T, V_l) dPdV_l = self.EOS.dPdV(a_l, b_l, R, T, V_l) dCp_l = self.EOS.dCp(dCv_l, dPdT_l, dPdV_l, T) ## print "DAL",dA_l,dS_l # Vapor a_v = MixingRules.MolarK(yf, a_i, k=0) b_v = MixingRules.Molar(yf, b_i) dadT_v = MixingRules.MolarK(yf, dadT, k=0) d2adT2_v = MixingRules.MolarK(yf, d2adT2, k=0) dA_v = self.EOS.dA(Z_v, a_v, b_v, B_v, R, T) dS_v = self.EOS.dS(Z_v, dadT_v, b_v, B_v, R, T) dH_v = self.EOS.dH(Z_v, dA_v, dS_v, R, T) INTd2PdT2_v = self.EOS.INTd2PdT2(Z_v, d2adT2_v, b_v, B_v) dCv_v = self.EOS.dCv(INTd2PdT2_v, R, T) dPdT_v = self.EOS.dPdT(dadT_v, b_v, R, T, V_v) dPdV_v = self.EOS.dPdV(a_v, b_v, R, T, V_v) dCp_v = self.EOS.dCp(dCv_v, dPdT_v, dPdV_v, T) #Mix HV = sum(yf * model["HV"]) Ho_M = sum(yf * H0) So_M = sum(yf * S0) - R * sum(yf * log(yf)) H_l = Ho_M + dH_l H_v = Ho_M + dH_v ## print "H ",H_l,H_v,HV ## print "V ", V_l,V_v case.Prop["Z"] = Z case.Prop["Zl"] = Z_l case.Prop["Zv"] = Z_v case.Prop["Ki"] = ki case.Prop["FracVap"] = FrVap case.Prop["CoefPureLiq"] = CoeFugo_l case.Prop["CoefPureVap"] = CoeFugo_v case.Prop["CoefMixVLiq"] = CoeFugM_l case.Prop["CoefMixVap"] = CoeFugM_v case.Prop["xf"] = xf case.Prop["yf"] = yf case.Prop["H0"] = Ho_M case.Prop["S0"] = So_M
def ln(r, alpha, beta): return N.exp(-0.5/beta**2 * (N.log(r) - alpha)**2 \ - 0.5*N.log(2*N.pi)-N.log(beta*r))
def entropySD(self): centropy = N.sum(-N.log(self.msm)*\ self.msm)/float(self.n_cluster) return MU.SD(centropy)
def FugaP(self,Z,A,B): """ Fugacity Coefficient of Pure Substances""" LogFug = Z-1 - log(Z-B) - A/B*log(1+B/Z) Fug = exp( LogFug ) return Fug
def Isotermic(self,model,case): xm = case.Prop["x"] T = case.Prop["T"] P = case.Prop["P"] Ac = model["RK_A"] b_i = model["RK_B"] PreVap = self.PV.P(T,model) ## print "K_I",PreVap/P AlphaT = 1/sqrt( T) case.Prop["MolWt"] = sum( xm* model["MoleWt"] ) a_i = Ac * AlphaT A_i = ( a_i * P)/ pow( R * T,2) B_i = ( b_i * P )/( R * T) Zl_i= self.EOS.ZL(A_i,B_i) Zv_i= self.EOS.ZG(A_i,B_i) CoeFugo_v = self.FugaP(Zv_i, A_i, B_i ) CoeFugo_l = self.FugaP(Zl_i, A_i, B_i ) yf = xm xf = xm k_i = 1 RFrac = 2 #Iteration to calculate the Fractio Vapor while k_i <=10: A_vi = MixingRules.MolarK2( yf, A_i, k=0 ) A_li = MixingRules.MolarK2( xf, A_i,k=0) B_v = MixingRules.Molar( yf, B_i) A_v = MixingRules.MolarK( yf,A_i ,k=0 ) B_l = MixingRules.Molar( xf, B_i) A_l = MixingRules.MolarK( xf,A_i,k=0) Z_v = self.EOS.ZG(A_v,B_v) Z_l = self.EOS.ZL(A_l,B_l) CoeFugM_v = self.FugaM(Z_v, A_vi,B_i,A_v,B_v) CoeFugM_l = self.FugaM(Z_l, A_li,B_i,A_l,B_l) fi = P*CoeFugM_v*yf ki = CoeFugM_l/CoeFugM_v FrVap, xf, yf = Flash(ki, xm) Z = FrVap*Z_v + (1- FrVap)* Z_l ## print Z if (RFrac-FrVap)<= 1e-10: break RFrac = FrVap k_i +=1 V_l = Z_l * R * T / P V_v = Z_v * R * T / P #Thermal Calcs ## print model.keys() Cp,H0,S0 = self.Thermo.Calc(model["CP_A"],model["CP_B"],model["CP_C"],model["CP_D"],T,R) dadT = self.dadT( Ac,T) d2adT2 = self.d2adT2( Ac,T) dadT_l = MixingRules.MolarK( xf,dadT,k=0) d2adT2_l = MixingRules.MolarK( xf,d2adT2 ,k=0) #Liquid a_l = MixingRules.MolarK( xf, a_i, k=0 ) b_l = MixingRules.Molar( xf, b_i) dA_l = self.EOS.dA(Z_l,a_l,b_l,B_l,R,T) dS_l = self.EOS.dS(Z_l,dadT_l,b_l,B_l,R,T) dH_l = self.EOS.dH(Z_l,dA_l,dS_l,R,T) INTd2PdT2_l = self.EOS.INTd2PdT2(Z_l,d2adT2_l,b_l,B_l) dCv_l= self.EOS.dCv(INTd2PdT2_l,R,T) dPdT_l = self.EOS.dPdT(dadT_l,b_l,R,T,V_l) dPdV_l = self.EOS.dPdV(a_l,b_l,R,T,V_l) dCp_l= self.EOS.dCp(dCv_l,dPdT_l,dPdV_l,T) ## print "DAL",dA_l,dS_l # Vapor a_v = MixingRules.MolarK( yf, a_i, k=0 ) b_v = MixingRules.Molar( yf, b_i) dadT_v = MixingRules.MolarK( yf,dadT,k=0) d2adT2_v = MixingRules.MolarK( yf,d2adT2 ,k=0) dA_v = self.EOS.dA(Z_v,a_v,b_v,B_v,R,T) dS_v = self.EOS.dS(Z_v,dadT_v,b_v,B_v,R,T) dH_v = self.EOS.dH(Z_v,dA_v,dS_v,R,T) INTd2PdT2_v = self.EOS.INTd2PdT2(Z_v,d2adT2_v,b_v,B_v) dCv_v= self.EOS.dCv(INTd2PdT2_v,R,T) dPdT_v = self.EOS.dPdT(dadT_v,b_v,R,T,V_v) dPdV_v = self.EOS.dPdV(a_v,b_v,R,T,V_v) dCp_v= self.EOS.dCp(dCv_v,dPdT_v,dPdV_v,T) #Mix HV = sum(yf*model["HV"]) Ho_M = sum(yf*H0) So_M = sum(yf*S0) -R*sum(yf*log(yf)) H_l = Ho_M +dH_l H_v = Ho_M +dH_v ## print "H ",H_l,H_v,HV ## print "V ", V_l,V_v case.Prop["Z"] = Z case.Prop["Zl"] = Z_l case.Prop["Zv"] = Z_v case.Prop["Ki"] = ki case.Prop["FracVap"] = FrVap case.Prop["CoefPureLiq"] = CoeFugo_l case.Prop["CoefPureVap"] = CoeFugo_v case.Prop["CoefMixVLiq"] = CoeFugM_l case.Prop["CoefMixVap"] = CoeFugM_v case.Prop["xf"] = xf case.Prop["yf"] = yf case.Prop["H0"] = Ho_M case.Prop["S0"] = So_M
def FugaM(self,Z,A_i,B_i,A,B): ## print "z-b",Z,Z-B LogFug = B_i/B*(Z-1) - log(Z-B) + A/B * ( B_i/B- 2*A_i/A )* log( 1 + B/Z) Fug = exp ( LogFug ) return Fug
def clusterEntropy(self): centropy = N.diagonal(N.dot(self.msm, N.transpose(N.log(self.msm)))) return -1/float(self.npoints)*centropy
def clusterEntropy(self): centropy = N.diagonal(N.dot(self.msm, N.transpose(N.log(self.msm)))) return -1 / float(self.npoints) * centropy
def T(self,P,m): return array( m["ANT_B"] / (m["ANT_A"] - log(P/self.Factor)) - m["ANT_C"])
def Entropy(self, a, b, c, d, T): S0 = a * log(T) + b * (T - 1) + c * (pow(T, 2) - 1) / 2 + d * (pow(T, 3) - 1) / 3 ## S_std = a*log(T_ref) + b*(T_ref ) + c* ( pow( T_ref,2 ) )/2 +d*( pow( T_ref,3 ) )/3 return S0
def INTd2PdT2(self, Z, d2adT2, b, B): u = self.u w = self.w L = log((2 * Z - B * (u - sqrt(u * u - 4 * w))) / (2 * Z + B * (u - sqrt(u * u - 4 * w)))) return d2adT2 / (b * sqrt(u * u - 4 * w)) * L
def FugaM(self,Z,A_i,B_i,A,B): LogFug = B_i/B*(Z-1) - log(Z-B) + A/B * ( B_i/B- 2*sqrt( A_i/A ) )* log( 1 + B/Z) Fug = exp ( LogFug ) return Fug
def FugaM(self,Z,A_i,B_i,A,B): L = ( 1/( 2*sqrt(2) ) ) * log( ( Z +B*(1+sqrt(2) ) ) / ( Z +B*(1-sqrt(2) ) ) ) LogFug = B_i/B*(Z-1) - log(Z-B) + A/B * ( B_i/B- 2*A_i/A ) * L Fug = exp ( LogFug ) return Fug
def estimate_reference_single(entry, stats, bounds, ref=0.0, verbose=False, exclude=None, entry_name=None, atom_type='H', exclude_outliers=False,molType='protein'): A = 0. B = 0. S = 0. N = 1 ## loop through all atom types classes = decompose_classes(entry, bounds, atom_type,molType=molType) if exclude and not entry_name: raise TypeError, 'attribute entry_name needs to be set.' n_excluded = 0 n_total = 0 for key, shifts in classes.items(): ## print entry_name, key if not key in stats: if verbose: print key,'no statistics.' continue if exclude and (entry_name, key) in exclude: print entry_name, key, 'excluded from ref estimation.' continue ## get statistics for current atom type mu, sd = stats[key][:2] k = 1./sd**2 if exclude_outliers is not False: ## calculate Z scores and exclude shifts with high Z scores from analysis Z = abs(shifts-mu)/sd mask_include = Numeric.less(Z, exclude_outliers) shifts = Numeric.compress(mask_include, shifts) n_excluded += len(Z)-Numeric.sum(mask_include) n_total += len(Z) n = len(shifts) if not n: continue A += k*n*(median(shifts)-mu) B += k*n S += -0.5*len(shifts)*Numeric.log(k)+0.5*k*sum((Numeric.array(shifts)-mu-ref)**2) N += n if B > 0.: ref_mu = A/B ref_sd = 1./Numeric.sqrt(B) else: ref_mu = None ref_sd = None if exclude_outliers is not False and n_excluded == n_total: print '%d/%d outliers discarded' % (n_excluded, n_total) return ref_mu, ref_sd, S/N