def test_convergenceRate(): problem = trigonometricSolution() h = 0.1 T = 1.0 hValues = [] errorValues = [] for i in range(7): dt = h xnodes = ynodes = int(1/sqrt(dt)) u_e, u = nld.runSolver(problem,T, dt, [xnodes,ynodes]) e = u_e.vector().array() - u.vector().array() E = np.sqrt(np.sum(e**2)/u.vector().array().size) hValues.append(h) errorValues.append(E) h/=2 r = zeros((len(hValues))) for i in range(1, len(hValues)): r[i] = ln(errorValues[i-1]/errorValues[i])/ ln(hValues[i-1]/hValues[i]) print "h = ",hValues[i-1], " E = ", errorValues[i-1], " r = ", r[i] if not nt.assert_almost_equal(1,r[-1],places=1): print "test_convergenceRate succeeded!"
def logjointprob(Z, X, A, phi, prior): Z = minus1(Z) X = minus1(X) return sum([ln(prior[Z[0]]), ln(phi[X[0]][Z[0]])] + [ln(A[Z[e]][Z[e-1]]) + ln(phi[X[e]][Z[e]]) for e in xrange(1, len(Z))] + [ln(A[2][Z[-1]])])
def _get_means_stdevs(cls, x, y): x_y_counter_lin = cls._convert_x_y_to_counter(x, y) x_y_counter = cls._convert_x_y_to_counter(x, [ln(y_i) for y_i in y]) st_dev = {x: ln(stdev(y) if stdev(y) > 0 else 1 ** -10) for x, y in x_y_counter_lin.items()} mean_ = {x: mean(y) for x, y in x_y_counter.items()} return cls._get_mean_stdev_from_counter(x_y_counter, st_dev, mean_)
def AIC(records, SSR, p): """ln(SSR(p)/T + (p+1)2/T is the formula. The lower this is, the better the fit. This is the Akaike Information Criterion. I am using it because I don't think that any of the models accurately represents reality, so I am finding the one that fits best. """ return math.ln(SSR(records, p) / math.ln(len(records))) + (p + 1) * 2 / len(records)
def main(fname = '/home/rerla/Downloads/Galaxy28-[HT100_K562_differential_results.xls'): """ two passes so we can estimate percentiles and estimate a kind of fisher's independent combined p value """ dat = open(fname,'r').readlines() dhead = dat[0].strip().split('\t') dat = dat[1:] dat = [x.split() for x in dat if len(x.split()) > 0] conts=[] treats=[] for index,row in enumerate(dat): # header cont = row[5] treat = row[6] conts.append((float(cont),index)) treats.append((float(treat),index)) # so can recover row conts.sort() conts.reverse() treats.sort() treats.reverse() treats = [(rank,x[0],x[1]) for rank,x in enumerate(treats)] conts = [(rank,x[0],x[1]) for rank,x in enumerate(conts)] tdict = dict(zip([x[2] for x in treats],treats)) # decorate so can lookup a data row cdict = dict(zip([x[2] for x in conts],conts)) # decorate so can lookup a data row res = [] n = float(len(dat) - 1) for dindex in range(len(dat)): # dindex = record in d if dindex % 10000 == 0: print dindex treati,treat,tindex = tdict.get(dindex,(0,0,0)) conti,cont,cindex = cdict.get(dindex,(0,0,0)) crank = conti/n trank = treati/n try: logfold = math.log(treat/cont,2) except: print 'bad logfold treat=%f cont=%f' % (treat,cont) logfold = 0 logA = math.log(treat+cont,2) try: logM = math.log(abs(treat-cont),2) except: print 'bad logM treat=%f cont=%f' % (treat,cont) logM = 0 try: fish = -2.0*(math.ln(crank) + math.ln(trank)) except: print "bad fisher's combined crank=%f trank=%f" % (crank,trank) fish = 0 row = copy.copy(dat[dindex]) row += ['%i' % conti, '%i' % treati,'%f' % logfold,'%f' % logA, '%f' % logM,'%f' % crank,'%f' % trank,'%f' % fish] res.append('\t'.join(row)) h = copy.copy(dhead) h += ['conti','treati','logfold','logA','logM','crank','trank','fakefishers'] res.insert(0,h) outfname = '%s_fixed.xls' % fname outf = open(outfname,'w') outf.write('\n'.join(res)) outf.close()
def computeShannon(fh, header, verbose): for line in fh: if header: header -= 1 print "%s\tShannon_index" % line.rstrip() continue #------------------------- lineL = line.split() expr = [float(i) for i in lineL[1:]] if verbose: print >>sys.stderr, expr expr_sum = sum(expr) if verbose: print >>sys.stderr, expr_sum assert expr_sum != 0 expr_R = [1.0 * i / expr_sum for i in expr] if verbose: print >>sys.stderr, expr_R expr_Log = [] for i in expr_R: if i != 0: expr_Log.append(i*ln(i)/ln(2)) else: expr_Log.append(i) if verbose: print >>sys.stderr, expr_Log shannon = -1 * sum(expr_Log) print "%s\t%s" % (line.strip(),str(shannon))
def propagate(self, context): l = self.lhs.getValue() b = self.base.getValue() e = self.exponent.getValue() if l: if b: if e: # l,b,e if l==b**e: pass # but overconstrained else: print("Error! " + self.name + " is overconstrained") return False else: # l,b => e self.exponent.setValue(math.ln(l)/math.ln(b)) else: if e: #l,e => b self.base.setValue(l**(1/e)) # TODO allow multiple solutions... else: # l only pass else: # not l if b: if e: # b,e => l self.lhs.setValue(b**e) else: # b only pass else: # no l, no b pass return True
def genColor (n, startpoint=0): assert n >= 1 # This splits the 0 - 1 segment in the pizza way h = (2*n-1)/(2**ceil(ln(n)/ln(2)))-1 h = (h + startpoint) % 1 # We set saturation based on the amount of green, in the range 0.6 to 0.8 rgb = colorsys.hsv_to_rgb(h, 1, 1) rgb = colorsys.hsv_to_rgb(h, 1, (1-rgb[1])*0.2+0.6) return rgb
def get_mean_stdev_r_squared(self, x, y): # remove all x,y's if y is less than or equal to 0 yy, xx = ExponentialDistributionFunction._remove_non_positive_values(y, x) x_, y_mean, y_std = ExponentialDistributionFunction._get_means_stdevs(x, [y_i for y_i in yy]) # these have to be of the form y = c x + d, therefore we use ln(y) = b x + ln(a) # which (in theory) is equivalent to y = a e^(bx) mu = get_r_squared(x_, y_mean, self.mean_b, ln(self.mean_a)) sigma = get_r_squared(x_, y_std, self.stdev_b, ln(self.stdev_a)) return mu, sigma
def solver(mesh, deg): # Physical parameters dpdx = Constant(-1) mu = Constant(100) V = FunctionSpace(mesh, "Lagrange", deg) u = TrialFunction(V) v = TestFunction(V) # Mark boundary subdomians class Sides(SubDomain): def inside(self, x, on_boundry): return on_boundry side = Sides() mf = FacetFunction("size_t", mesh) mf.set_all(2) side.mark(mf, 1) noslip = DirichletBC(V, Constant(0), mf, 1) a = inner(grad(u), grad(v)) * dx L = -1.0 / mu * dpdx * v * dx u_ = Function(V) solve(a == L, u_, bcs=noslip) # Compute the flux Q = assemble(u_ * dx) # Flux from analytical expression mu = 100 dpdx = -1 F = (A ** 2 - B ** 2 + C ** 2) / (2 * C) M = sqrt(F ** 2 - A ** 2) alpha = 0.5 * ln((F + M) / (F - M)) beta = 0.5 * ln((F - C + M) / (F - C - M)) s = 0 for n in range(1, 100): s += (n * exp(-n * (beta + alpha))) / sinh(n * beta - n * alpha) Q_analytical = ( (pi / (8 * mu)) * (-dpdx) * (A ** 4 - B ** 4 - (4 * C * C * M * M) / (beta - alpha) - 8 * C * C * M * M * s) ) Q_error = abs(Q - Q_analytical) print "Flux computed numerically : ", Q print "Flux computed using (3-52): ", Q_analytical return mesh.hmin(), Q_error
def ext(n): n = int(n * ln(n) + n * (ln(ln(n)))) candidates = list(range(n+1)) fin = int(n**0.5) # Loop over the candidates, marking out each multiple. for i in xrange(2, fin+1): if candidates[i]: candidates[2*i::i] = [None] * (n//i - 1) # Filter out non-primes and return the list. return [i for i in candidates[2:] if i]
def create_from_x_y_coordinates(cls, x, y, distribution_type: type(Distribution)=NormalDistribution): xx, yy = LogLinearDistributionFunction._remove_non_positive_values(x, y) x_, y_mean, y_std = LogLinearDistributionFunction._get_means_stdevs([ln(x_i) for x_i in xx], [y_i / xx[i] for i, y_i in enumerate(yy)]) mean_a, mean_b = linear_regression(x_, y_mean) stdev_a, stdev_b = linear_regression(x_, y_std) return cls(mean_a, mean_b, stdev_a, stdev_b)
def classify(X, prior, mu, sigma, covdiag=True): h = [0] * len(X) Ls = [np.linalg.cholesky(sigma[:, :, k]) for k in range(len(mu))] for i, x_star in enumerate(X): maxval = float("-inf") selectedClass = -1 for k, muk in enumerate(mu): sigma_k = sigma[:, :, k] if covdiag == True: diff = x_star - muk y = np.linalg.solve(sigma_k, diff.T) else: L = Ls[k] diff = x_star - muk v = np.linalg.solve(L, diff.T) y = np.linalg.solve(L.T, v) (sign, logdet) = numpy.linalg.slogdet(sigma_k) a = -0.5 * sign * logdet b = -0.5 * numpy.dot(diff, y) c = ln(prior[k]) total = a + b + c if total > maxval: selectedClass = k maxval = total h[i] = selectedClass return numpy.array(h)
def mine(n): # Upper bound on the nth prime value # http://en.wikipedia.org/wiki/Prime_number_theorem#Approximations_for_the_nth_prime_number n = int(n * ln(n) + n * (ln(ln(n)))) primes = [2] def is_prime(primes, x): for y in primes: if x % y == 0: return False return True for x in xrange(3, n, 2): if is_prime(primes, x): primes.append(x) return primes
def setValue(self, value, context): base = self.argA.getValue(context) exp = self.argB.getValue(context) if base: if exp: # a,b if value==base**exp: pass # but overconstrained else: print("Error! " + self.name + " is overconstrained") else: # base => exp self.argB.setValue(math.ln(value)/math.ln(base), context) else: if exp: #exp => base self.argA.setValue(value**(1/exp), context) # TODO allow multiple solutions... else: # neither => can't do anything pass
def read(self, addr): debug('debugHIH6130', self.name, "read", addr) try: data = self.interface.readBlock((self.addr, 0), 4) status = (data[0] & 0xc0) >> 6 humidity = ((((data[0] & 0x3f) << 8) + data[1]) * 100.0) / 16383.0 temp = (((data[2] & 0xff) << 8) + (data[3] & 0xfc)) / 4 tempC = (temp / 16384.0) * 165.0 - 40.0 tempF = tempC * 1.8 + 32 # https://en.wikipedia.org/wiki/Dew_point # 0C <= T <= +50C b = 17.368 c = 238.88 gamma = ln(humidity / 100) + ((b * tempC) / (c + tempC)) dewpointC = c * (gamma) / (b - gamma) dewpointF = dewpointC * 1.8 + 32 debug("debugHIH6130", "humidity:", humidity, "tempC:", tempC, "tempF:", tempF, "dewpointC:", dewpointC, "dewpointF:", dewpointF) if addr == "humidity": return humidity elif addr == "temp": return tempF elif addr == "dewpoint": return dewpointF else: return 0 except: return 0
def get_log_lbf_from_regression_coefficients(a: float, b: float, max_x: float=130000)->tuple: x = list() y = list() for i in float_range(0.1, max_x, 0.5): x.append(i) y.append(a * ln(i) + b) return x, y
def _probabilistic_sum(number_of_dice, sides): """ For inordinately large numbers of dice, we can approximate their sum by picking a random point under the bell-curve that represents the probabilistic sums. We accomplish this by picking a random y value on the curve, then picking a random x value from the bounds of that y intercept. """ n = number_of_dice s = sides u = ((s + 1.) / 2) * n # mean B = (1.7 * (n ** .5) * ((2 * pi) ** .5)) max_y = 1. / B min_y = (e ** ((-(n - u) ** 2) / (2 * 1.7 * 1.7 * n))) / B Y = random.uniform(min_y, max_y) try: T = ln(Y * B) * (2 * (1.7 * 1.7) * n) except ValueError: # Too close to 0, rounding off T = 0 min_x, max_x = n, n * s else: min_x, max_x = _quadratic(1, -2 * u, T + u ** 2) return int(round(random.uniform(min_x, max_x)))
def K(feed_size,interest): scale_point = float(self.personalization_scale_point) how_many = float(self.personalization_scale_how_many) desired_ratio = float(1)#float(overall_interest) #print feed_size, 1- ( (ln(desired_ratio*std_size/feed_size) / float(self.interest_ratio_x)) ) return -(ln(desired_ratio*how_many/feed_size) / float(scale_point))
def S(dist):#define entropy of a distribution tot = float(sum(dist)) normdist = [i/tot for i in dist] s=0 for p in normdist: if p > 0: s -= p*ln(p) return s
def _antider(self, var): if self.f == var and self.power.__class__ == Fconst: c = Fconst(self.power.c + 1) return self.f ** c / c elif self.f.__class__ == Fconst and self.power == var: return self / math.ln(self.f.c) else: raise errors.CantFindAntiDerivativeException()
def calculate_voting_power(error_rate): """Given a classifier's error rate (a number), returns the voting power (aka alpha, or coefficient) for that classifier.""" if error_rate==0: return INF if error_rate == 1: return -INF return .5*ln((1-error_rate)/error_rate)
def computeShannon(aList, plus=1): #print aList if len(aList) < 2: print >>sys.stderr, "You may need to specify \ -I parameter if the program stops." expr = [float(i)+1 for i in aList] expr_sum = sum(expr) assert expr_sum != 0 expr_R = [1.0 * i / expr_sum for i in expr] expr_Log = [] for i in expr_R: if i != 0: expr_Log.append(i*ln(i)/ln(2)) else: expr_Log.append(i) shannon = -1 * sum(expr_Log) return shannon
def computeNextPacketTick(self): randNum = random.uniform(0,1) deltaTime = (-1.0/float(self.packetPerSec))*ln(1-randNum) nextGenTick = int(deltaTime*(1/self.secPerTick)) + self.tickCounter if int(deltaTime*(1/self.secPerTick)) == 0: nextGenTick = self.tickCounter #print "next packet generating at {0}".format(nextGenTick) return nextGenTick
def __pow__(self, o): if o.__class__ == value: return value( self.val ** o.val, self.var * o.val ** 2 * self.val ** (2 * (o.val - 1)) + o.var * math.ln(self.val) * self.val ** o.val, False, ) else: return value(self.val ** float(o), self.var * float(o) ** 2 * self.val ** (2 * (float(o) - 1)), False)
def f47(V,a,b,c,d,e,f): V = float(V) a = float(a) b = float(b) c = float(c) d = float(d) e = float(e) f = float(f) EF = exp( a + (b / V)) + (c * ln(V)) return EF
def instance_var(var): if type(var) in (int, long, float, str): return var if var.scale == LOG_SCALE: low, high = ln(var.low), ln(var.high) # to log scale else: low, high = var.low, var.high if var.dist == UNIFORM: val = low + random() * (high - low) else: raise NotImplementedError( "failure on probability distribution {d}".format(d=var.dist)) if var.scale == LOG_SCALE: val = exp(val) # reverse log if var.type == int: val += 0.5 return var.type(val)
def GetPeers(uid): initlist = GetMFollowers(uid) lufollowers = ln(cursor.execute("SELECT followers_count FROM user WHERE id = "+ str(uid)).fetchall()[0][0]) proclist = [] for i in range(min([250,len(initlist)])): cursor.execute("SELECT EXISTS(SELECT id FROM user WHERE id = "+str(initlist[i][0])+")") if (cursor.fetchall()[0][0] > 0): procresult = cursor.execute("SELECT id, followers_count, 0 FROM user WHERE id = "+ str(initlist[i][0])).fetchall()[0] if (len(procresult) > 0): proclist.append(procresult) rawset = [] for i in range(len(proclist)): tluf = ln(proclist[i][1]) if (tluf >= 0.95 * lufollowers and tluf <= 1.05 * lufollowers): rawset.append([proclist[i][0], proclist[i][1], tluf]) resultset = [] for i in range(len(rawset)): resultset.append([rawset[i][0],]) return resultset
def f31(V,a,b,c,d,e,f): V = float(V) a = float(a) b = float(b) c = float(c) d = float(d) e = float(e) f = float(f) EF = a + (b / (1+exp((((-1) * c) + (d * ln(V))) + (e * V)))) return EF
def convergence_rate(u_exact, f, u_D, kappa): """ Compute convergence rates for various error norms for a sequence of meshes and elements. """ h = {} # discretization parameter: h[degree][level] E = {} # error measure(s): E[degree][level][error_type] degrees = 1, 2, 3, 4 num_levels = 5 # Iterate over degrees and mesh refinement levels for degree in degrees: n = 4 # coarsest mesh division h[degree] = [] E[degree] = [] for i in range(num_levels): n *= 2 h[degree].append(1.0 / n) u = solver(kappa, f, u_D, n, n, degree, linear_solver='direct') errors = compute_errors(u_exact, u) E[degree].append(errors) print('2 x (%d x %d) P%d mesh, %d unknowns, E1=%g' % (n, n, degree, u.function_space().dim(), errors['u - u_exact'])) # Compute convergence rates from math import log as ln # log is a fenics name too error_types = list(E[1][0].keys()) rates = {} for degree in degrees: rates[degree] = {} for error_type in sorted(error_types): rates[degree][error_type] = [] for i in range(num_meshes): Ei = E[degree][i][error_type] Eim1 = E[degree][i-1][error_type] r = ln(Ei/Eim1)/ln(h[degree][i]/h[degree][i-1]) rates[degree][error_type].append(round(r,2)) return rates
boost_2012_cl_to_miscl = { "<2": ["B", "E"], "<4": ["C", "B", "E"], "<6": ["C"], ">2": ["A", "C", "D"], ">4": ["A", "D"], ">6": ["A", "B", "D", "E"] } #1 round def adaboost_0_getargs(): #TEST 27 return [boost_2012_tr_pts, boost_2012_cl_to_miscl, True, 0, 1] adaboost_0_expected = [("<6", .5 * ln(4))] def adaboost_0_testanswer(val, original_val=None): return classifier_approx_equal(val, adaboost_0_expected) make_test(type='FUNCTION_ENCODED_ARGS', getargs=adaboost_0_getargs, testanswer=adaboost_0_testanswer, expected_val=str(adaboost_0_expected), name='adaboost') #2 rounds def adaboost_1_getargs(): #TEST 28
def transform_vmaf(vmaf): if vmaf < 99.99: return -ln(1 - vmaf / 100) else: # return -ln(1-99.99/100) return 9.210340371976184
def convertToInches(self, value): inches = 0 if value <= 0 else -5.07243 * ln(0.0000185668 * value) return inches
def compute_kettle_vaporizer_purchase_price(A, CE): return exp(12.3310 - 0.8709*ln(A) + 0.09005 * ln(A)**2)*CE/567
def compute_double_pipe_purchase_price(A, CE): return exp( 7.2718 + 0.16*ln(A))*CE/567
def calculatingW0(w, mean_vector, prior_probability, covariance_matrix): W0 = (np.matmul(np.array(mean_vector), np.transpose(np.array(w))) / (-2)) W0 -= float(ln(np.linalg.det(np.array(covariance_matrix))) / 2) W0 += ln(prior_probability) return W0
def compute_u_tube_purchase_price(A, CE): return exp(11.5510 - 0.9186*ln(A) + 0.09790 * ln(A)**2)*CE/567
def f12(x): if x < 45: return exp_cast(ln(abs(sin(x))) + 56 * x) if x >= 91: return exp_cast(pow(x, 6) - pow(x, 4)) return exp_cast(7 * pow((x - 17 * pow(x, 4)), 3) + sin(x))
def funcDistributionExponentialInverse(x): result = math.ln(1/(1-a))*arithmeticMean return result
def gen_events(): e = 0 for _ in range(sample_size): e += -ln(uniform(0, 1)) yield e
def years(P, A, r): return ln(A / P) / ln(1 + r / 100)
def test_A(): A20 = A_dh(80.1, 293.15, 998.2071) / ln(10) assert abs(A20 - 0.50669) < 1e-5
""" The task: https://stepik.org/lesson/165493/step/5?unit=140087 """ from selenium import webdriver import time from math import log as ln, sin tested_link = "http://suninjuly.github.io/math.html" try: browser = webdriver.Chrome() browser.get(tested_link) x = int(browser.find_element_by_id("input_value").text) result = ln(abs(12 * sin(x))) input_field = browser.find_element_by_id("answer") input_field.send_keys(str(result)) checkbox = browser.find_element_by_css_selector("[for='robotCheckbox']") checkbox.click() radio = browser.find_element_by_id("robotsRule") radio.click() submit = browser.find_element_by_css_selector("button.btn") submit.click() finally: time.sleep(5)
def placeholder_weibullvariate(alpha, beta): return alpha * ln(2)**(1 / beta)
mesh = generate_mesh(Ellipse(Point(c),a ,b, N), N) V = FunctionSpace(mesh, 'CG', degree) u = TrialFunction(V) v = TestFunction(V) F = inner(grad(u), grad(v))*dx + 1/mu*dpdx*v*dx bc = DirichletBC(V, Constant(0), DomainBoundary()) u_ = Function(V) solve(lhs(F) == rhs(F), u_, bcs=bc) #u_e = interpolate(u_exact(), V) u_e = interpolate(u_c, V) bc.apply(u_e.vector()) u_error = errornorm(u_e, u_, degree_rise=0) if N==5 or N==20 or N==80: plot(u_, title="Numerical") plot(u_e, title="Exact") interactive() return u_error, mesh.hmin() E = []; h = []; degree = 3 for n in [5, 10, 20, 40, 80]: ei, hi = main(n, degree=degree) E.append(ei) h.append(hi) for i in range(1, len(E)): r = ln(E[i]/E[i-1])/ln(h[i]/h[i-1]) print "h=%2.2E E=%2.2E r=%.2f" %(h[i], E[i], r)
for trial in range(0, len(Uvariable)): Wvariable.append(h(Uvariable[trial])) numBins = 100 plt.hist(Wvariable, numBins, normed=1, facecolor='green', alpha=0.75) plt.show() Uknown1 = [] Uknown2 = [] for trial in range(0, TrialNumber): Uvariable1 = random.random() Uvariable2 = random.random() Unkown1.append( math.sqrt(-2 * math.ln(Uvariable1) * math.cos(2 * math.PI * Uvariable2))) Unkown2.append( math.sqrt(-2 * math.ln(Uvariable1) * math.cos(2 * math.PI * Uvariable2))) numBins = 100 plt.hist(Unkown1, numBins, normed=1, facecolor='green', alpha=0.75) plt.show() plt.clf() numBins = 100 plt.hist(Unkown2, numBins, normed=1, facecolor='green', alpha=0.75) plt.show() ''' 1. What is the type of random variable Unkown1? 2. What is its mean and variance?
def show_exponential_dist(sample_size, bins): plt.hist([-ln(uniform(0, 1)) for _ in range(sample_size + 1)], bins) plt.show()
def p(n, N): """ Relative abundance """ if n is 0: return 0 else: return (float(n) / N) * ln(float(n) / N)
HDFS_SERVICE_CONFIG = { 'dfs_replication': DFS_REPLICATION, 'dfs_block_size': '268435456', 'dfs_block_local_path_access_user': '******', 'hdfs_service_env_safety_valve': 'HADOOP_CLASSPATH=$HADOOP_CLASSPATH:' + DTAP_JAR, } HDFS_NAMENODE_SERVICE_NAME = "nn" CMD_TIMEOUT = 1800 HDFS_NAME_SERVICE = UNIQUE_NAME NAMENODE_HANDLER_COUNT = max(int(ln(len(HDFS_DATANODE_HOSTS)) * 20), 30) HDFS_NAMENODE_CONFIG = { 'dfs_name_dir_list': HADOOP_DATA_DIR + '/namenode', 'dfs_namenode_handler_count': NAMENODE_HANDLER_COUNT, 'dfs_namenode_service_handler_count': NAMENODE_HANDLER_COUNT } HDFS_SECONDARY_NAMENODE_CONFIG = { 'fs_checkpoint_dir_list': HADOOP_DATA_DIR + '/namesecondary', } HDFS_DATANODE_CONFIG = { 'dfs_data_dir_list': HADOOP_DATA_DIR + '/datanode', 'dfs_datanode_data_dir_perm': 755, 'dfs_datanode_du_reserved': '1073741824' }
def gammaln(n): return math.ln(factorial(n - 1))
def compute_fixed_head_purchase_price(A, CE): return exp(11.4185 - 0.9228*ln(A) + 0.09861 * ln(A)**2)*CE/567
def gamma(components, temperature, fractions): cs = components T = temperature x = fractions for item in x: if item == 0: item = 1E-05 # Get Q and R values for groups groupi = [] groupk = {} ip = {} file_path = "Models\\unifac.txt" with open(file_path, 'r') as f: lines = f.readlines() for i in range(0, len(cs)): groups = cs[i].UnifacVLE rk_data = [] for pair in groups: for line in lines: aux = line.split(',') if aux[1] == str(pair[0]): ip[pair[0]] = int(aux[0]) if pair[0] in groupk.keys(): groupk[pair[0]][0].append((i, pair[1])) else: groupk[pair[0]] = ([ (i, pair[1]) ], float(aux[4]), float(aux[5])) rk_data.append((pair[0], pair[1], float(aux[4]), float(aux[5]))) break groupi.append(rk_data) #groupk= {17: ([(0, 1)], 0.92, 1.4), 1: ([(1, 1)], 0.9011, 0.848), 2: ([(1, 1)], 0.6744, 0.54), 15: ([(1, 1)], 1.0, 1.2)} #Calculate r and q values for components r = [] q = [] for i in range(0, len(cs)): ri = 0 qi = 0 for data in groupi[i]: ri += data[1] * data[2] qi += data[1] * data[3] r.append(ri) q.append(qi) # Calculation of residual and combinatorial parts # ln gamma_k = Qk*[ 1-ln(sum(tetai*taui,k)) - sum [ (tetai*taui,m)/sum(tetaj*tauj,m)] # Calculate activity coefficients for each group group_names = [] # Get group numbers for key in groupk.keys(): group_names.append(key) def X(k): """Calculates group fraction for k""" aux_group = groupk[k] aux1 = 0 aux2 = 0 for item in aux_group[0]: #Item = (i, vi) vk = item[1] i = item[0] aux1 += vk * x[i] for index in group_names: aux_grp = groupk[index][0] for itm in aux_grp: aux2 += x[itm[0]] * itm[1] return aux1 / aux2 def tau(m, n): if m == n: return 1 else: file_name = "Models\\unifac_ip.txt" found = False m = ip[m] n = ip[n] with open(file_name, 'r') as f: lines = f.readlines() for line in lines: line = line.split("\t") if int(line[0]) == m and int(line[2]) == n: aij = float(line[4]) found = True elif int(line[0]) == m and int(line[2]) == n: aij = float(line[5]) found = True if found: return exp(-aij / T) else: print( "WARNING! No UNIFAC interaction parameters were found for groups", m, n) return exp(-50 / T) #default value taus = {} for m in group_names: for n in group_names: taus[(m, n)] = tau(m, n) Xk = [] #Calculate and store Xk values for k in group_names: Xk.append(X(k)) Xi = [] #Calculate and store Xk values for pure components def X2(k, xi): """Calculates group fraction for k""" aux_group = groupk[k] aux1 = 0 aux2 = 0 for item in aux_group[0]: #Item = (i, vi) vk = item[1] i = item[0] aux1 += vk * xi[i] for index in group_names: aux_grp = groupk[index][0] for itm in aux_grp: aux2 += xi[itm[0]] * itm[1] return aux1 / aux2 def teta(k): """Teta value for group m""" Qk = groupk[k][2] kk = group_names.index(k) aux = 0 for n in group_names: nk = group_names.index(n) Qn = groupk[n][2] aux += Qn * Xk[nk] tet = groupk[k][2] * Xk[kk] / aux return tet t5 = time.process_time() for i in range(0, len(cs)): ki = [] for k in group_names: xi = x.copy() for j in range(0, len(xi)): if i == j: xi[j] = 1 else: xi[j] = 0 ki.append(X2(k, xi)) Xi.append(ki) def tetai(k, i): """Teta value for group m in pure component""" Qk = groupk[k][2] kk = group_names.index(k) aux = 0 for n in group_names: nk = group_names.index(n) Qn = groupk[n][2] aux += Qn * Xi[i][nk] teti = groupk[k][2] * Xi[i][kk] / aux return teti teta_k = [] teta_ki = [] for i in range(0, len(cs)): pure_k = [] for k in group_names: pure_k.append(tetai(k, i)) teta_ki.append(pure_k) for k in group_names: teta_k.append(teta(k)) activity_R = [] #Residual part for activity coefficient ln gammaR for i in range(0, len(cs)): ln_gamma_R = 0 for k in group_names: vk = 0 for t in groupk[k][0]: if t[0] == i: vk = t[1] Qk = groupk[k][2] kk = group_names.index(k) nom = 0 aux = 0 nom_i = 0 aux_i = 0 for m in group_names: denom_i = 0 denom = 0 mm = group_names.index(m) for n in group_names: nn = group_names.index(n) denom += teta_k[nn] * taus[(n, m)] denom_i += teta_ki[i][nn] * taus[(n, m)] nom += teta_k[mm] * taus[(k, m)] / denom aux += teta_k[mm] * taus[(m, k)] nom_i += teta_ki[i][mm] * taus[(k, m)] / denom_i aux_i += teta_ki[i][mm] * taus[(m, k)] ln_gamma_k = Qk * (1 - ln(aux) - nom) ln_gamma_ki = Qk * (1 - ln(aux_i) - nom_i) ln_gamma_R += vk * (ln_gamma_k - ln_gamma_ki) activity_R.append(ln_gamma_R) activity_C = [] #Gamma combinatorial for components V = [] F = [] for i in range(0, len(cs)): aux_r = 0 aux_q = 0 for j in range(0, len(cs)): aux_r += r[j] * x[j] aux_q += q[j] * x[j] V.append(r[i] / aux_r) F.append(q[i] / aux_q) for i in range(0, len(cs)): aux = 1 - V[i] + ln( V[i]) - 5 * q[i] * (1 - V[i] / F[i] + ln(V[i] / F[i])) activity_C.append(aux) activity_coefficients = [] for i in range(0, len(cs)): activity_coefficients.append(exp(activity_C[i] + activity_R[i])) return activity_coefficients
def exponential(λ=1.0): return -ln(uniform(0, 1)) * λ
def compute_floating_head_purchase_price(A, CE): return exp(12.0310 - 0.8709*ln(A) + 0.09005 * ln(A)**2)*CE/567
def test_limiting_log_gamma(): A20 = A_dh(80.1, 293.15, 998.2071) / ln(10) log_gamma = limiting_log_gamma(0.4, -3, A20) assert abs(log_gamma + 2.884130) < 1e-4
def _decay_constant(self): """Calculates the decay constant, i.e. the probability that a nucleus should decay """ decay_constant = ln(2) / self._half_life return decay_constant * self._timestep
def chi2_spamprob(self, wordstream, evidence=False): """Return best-guess probability that wordstream is spam. wordstream is an iterable object producing words. The return value is a float in [0.0, 1.0]. If optional arg evidence is True, the return value is a pair probability, evidence where evidence is a list of (word, probability) pairs. """ from math import frexp, log as ln # We compute two chi-squared statistics, one for ham and one for # spam. The sum-of-the-logs business is more sensitive to probs # near 0 than to probs near 1, so the spam measure uses 1-p (so # that high-spamprob words have greatest effect), and the ham # measure uses p directly (so that lo-spamprob words have greatest # effect). # # For optimization, sum-of-logs == log-of-product, and f.p. # multiplication is a lot cheaper than calling ln(). It's easy # to underflow to 0.0, though, so we simulate unbounded dynamic # range via frexp. The real product H = this H * 2**Hexp, and # likewise the real product S = this S * 2**Sexp. H = S = 1.0 Hexp = Sexp = 0 clues = self._getclues(wordstream) for prob, word, record in clues: S *= 1.0 - prob H *= prob if S < 1e-200: # prevent underflow S, e = frexp(S) Sexp += e if H < 1e-200: # prevent underflow H, e = frexp(H) Hexp += e # Compute the natural log of the product = sum of the logs: # ln(x * 2**i) = ln(x) + i * ln(2). S = ln(S) + Sexp * LN2 H = ln(H) + Hexp * LN2 n = len(clues) if n: S = 1.0 - chi2Q(-2.0 * S, 2 * n) H = 1.0 - chi2Q(-2.0 * H, 2 * n) # How to combine these into a single spam score? We originally # used (S-H)/(S+H) scaled into [0., 1.], which equals S/(S+H). A # systematic problem is that we could end up being near-certain # a thing was (for example) spam, even if S was small, provided # that H was much smaller. # Rob Hooft stared at these problems and invented the measure # we use now, the simpler S-H, scaled into [0., 1.]. prob = (S - H + 1.0) / 2.0 else: prob = 0.5 if evidence: clues = [(w, p) for p, w, _r in clues] clues.sort(lambda a, b: cmp(a[1], b[1])) clues.insert(0, ('*S*', S)) clues.insert(0, ('*H*', H)) return prob, clues else: return prob
def shannon(n, N): """ Relative abundance """ if n == 0: return 0 else: return (float(n)/N) * ln(float(n)/N)
from math import log as ln file = open("base_exp.txt") z = [line.split(',') for line in file] file.close() for a in z: a[0] = int(a[0]) a[1] = int(a[1]) m = 0 l = 1 for a in z: if a[1] * ln(a[0]) > m: m = a[1] * ln(a[0]) print(l) l += 1
'u - interpolate(u_e,V)': E2, 'interpolate(u,Ve) - interpolate(u_e,Ve)': E3, 'error field': E4, 'infinity norm (of dofs)': E5, 'grad(error field)': E6 } return errors # Perform experiments degree = int(sys.argv[1]) h = [] # element sizes E = [] # errors # Changed this line so unit tests run faster for nx in [4, 8, 16]: #for nx in [4, 8, 16, 32, 64, 128, 264]: h.append(1.0 / nx) E.append(compute(nx, nx, degree)) # list of dicts # Convergence rates from math import log as ln # log is a dolfin name too error_types = list(E[0].keys()) for error_type in sorted(error_types): print('\nError norm based on', error_type) for i in range(1, len(E)): Ei = E[i][error_type] # E is a list of dicts Eim1 = E[i - 1][error_type] r = ln(Ei / Eim1) / ln(h[i] / h[i - 1]) print('h=%8.2E E=%8.2E r=%.2f' % (h[i], Ei, r))