def findNthPolygonalNumber( n, k ): if real_int( k ) < 3: raise ValueError( 'the number of sides of the polygon cannot be less than 3,' ) return nint( fdiv( fsum( [ sqrt( fsum( [ power( k, 2 ), fprod( [ 8, k, real( n ) ] ), fneg( fmul( 8, k ) ), fneg( fmul( 16, n ) ), 16 ] ) ), k, -4 ] ), fmul( 2, fsub( k, 2 ) ) ) )
def calculateAntiharmonicMean( args ): if isinstance( args, RPNGenerator ): return calculateAntiharmonicMean( list( args ) ) elif isinstance( args, list ): if isinstance( args[ 0 ], ( list, RPNGenerator ) ): return [ calculateAntiharmonicMean( list( arg ) ) for arg in args ] else: return fdiv( fsum( args, squared=True ), fsum( args ) ) else: return args
def getStandardDeviation( args ): if isinstance( args, RPNGenerator ): return getStandardDeviation( list( args ) ) elif isinstance( args[ 0 ], ( list, RPNGenerator ) ): return [ getStandardDeviation( arg ) for arg in args ] mean = fsum( args ) / len( args ) dev = [ power( fsub( i, mean ), 2 ) for i in args ] return sqrt( fsum( dev ) / len( dev ) )
def test_gth_solve_randmatrix(): N = 5 for j in range(10): A = mp.randmatrix(N, N) for i in range(N): A[i, :] /= mp.fsum(A[i, :]) A[i, i] = -mp.fsum((A[i, j] for j in range(N) if j != i)) run_gth_solve(A, verbose=VERBOSE)
def step(array): global _INFILE global _BETA global _NSTEP beta = _BETA old_positions, Ntides, is_3prime = array internal = Aptamer("leaprc.ff12SB",_INFILE) identifier = Ntides.replace(" ","") internal.sequence(identifier,Ntides.strip()) internal.unify(identifier) internal.command("saveamberparm union %s.prmtop %s.inpcrd"%(identifier,identifier)) time.sleep(2) #print("WhereamI?") print("Identifier: "+Ntides) volume = (2*math.pi)**5 aptamer_top = app.AmberPrmtopFile("%s.prmtop"%identifier) aptamer_crd = app.AmberInpcrdFile("%s.inpcrd"%identifier) # print("loaded") # if is_3prime == 1: en_pos = [mcmc_sample(aptamer_top, aptamer_crd, old_positions, index, Nsteps=_NSTEP) for index in range(10)] # else: # en_pos_task = [mcmc_sample_five(aptamer_top, aptamer_crd, old_positions, index, Nsteps=200) for index in range(20)] # barrier() # en_pos = value(en_pos_task) en = [] positions = [] positions_s = [] for elem in en_pos: en += elem[0] #print(elem[2], elem[1]) positions_s.append([elem[2], elem[1]]) positions = min(positions_s)[1] fil = open("best_structure%s.pdb"%Ntides,"w") app.PDBFile.writeModel(aptamer_top.topology,positions,file=fil) fil.close() del fil Z = volume*math.fsum([math.exp(-beta*elem) for elem in en])/len(en) P = [math.exp(-beta*elem)/Z for elem in en] S = volume*math.fsum([-elem*math.log(elem*volume) for elem in P])/len(P) print("%s : %s"%(Ntides,S)) return positions, Ntides, S
def getNthNonagonalSquareNumber( n ): if real( n ) < 0: ValueError( '' ) p = fsum( [ fmul( 8, sqrt( 7 ) ), fmul( 9, sqrt( 14 ) ), fmul( -7, sqrt( 2 ) ), -28 ] ) q = fsum( [ fmul( 7, sqrt( 2 ) ), fmul( 9, sqrt( 14 ) ), fmul( -8, sqrt( 7 ) ), -28 ] ) sign = power( -1, real_int( n ) ) index = fdiv( fsub( fmul( fadd( p, fmul( q, sign ) ), power( fadd( fmul( 2, sqrt( 2 ) ), sqrt( 7 ) ), n ) ), fmul( fsub( p, fmul( q, sign ) ), power( fsub( fmul( 2, sqrt( 2 ) ), sqrt( 7 ) ), fsub( n, 1 ) ) ) ), 112 ) return nint( power( nint( index ), 2 ) )
def dpint(f,snr): '''Integrand of the detection probability of single sources. Since it contains a modified Bessel function, which gets very big values, it has to be defined in a special way.''' big=mpmath.log(mpmath.besseli(1,snr*np.sqrt(2.*f))) small=mpmath.mpf(-f-0.5*snr**2.) normal=mpmath.log(np.sqrt(2.*f)*1./snr) result=mpmath.exp(mpmath.fsum([big,small,normal])) return float(result) #In the end the result should be between 0 and some sizeable number, so a float should be enough.
def sf(self, z): """ Compute the survival function of the truncated distribution Parameters ---------- z : float Minimum bound of the interval Returns ------- sf : float The survival function of the truncated distribution sf(z) = P( X > z | X is in intervals ) """ intervals = self.intervals Q, sumQ = self._Q, self._sumQ N = len(Q) dps = self._dps k, (a, b) = min( (k, (a, b)) for k, (a, b) in enumerate(intervals) if b > z) sf = fsum(Q[k+1:]) + self._cdf_notTruncated(max(a, z), b, dps) sf /= sumQ return sf
def getNthKFibonacciNumber( n, k ): if real( n ) < 0: raise ValueError( 'non-negative argument expected' ) if real( k ) < 2: raise ValueError( 'argument <= 2 expected' ) if n < k - 1: return 0 nth = int( n ) + 4 precision = int( fdiv( fmul( n, k ), 8 ) ) if ( mp.dps < precision ): mp.dps = precision poly = [ 1 ] poly.extend( [ -1 ] * int( k ) ) roots = polyroots( poly ) nthPoly = getNthFibonacciPolynomial( k ) result = 0 exponent = fsum( [ nth, fneg( k ), -2 ] ) for i in range( 0, int( k ) ): result += fdiv( power( roots[ i ], exponent ), polyval( nthPoly, roots[ i ] ) ) return floor( fadd( re( result ), fdiv( 1, 2 ) ) )
def calculateWindChill( measurement1, measurement2 ): validUnitTypes = [ [ 'velocity', 'temperature' ], ] arguments = matchUnitTypes( [ measurement1, measurement2 ], validUnitTypes ) if not arguments: raise ValueError( '\'wind_chill\' requires velocity and temperature measurements' ) wind_speed = arguments[ 'velocity' ].convert( 'miles/hour' ).value temperature = arguments[ 'temperature' ].convert( 'degrees_F' ).value if wind_speed < 3: raise ValueError( '\'wind_chill\' is not defined for wind speeds less than 3 mph' ) if temperature > 50: raise ValueError( '\'wind_chill\' is not defined for temperatures over 50 degrees fahrenheit' ) result = fsum( [ 35.74, fmul( temperature, 0.6215 ), fneg( fmul( 35.75, power( wind_speed, 0.16 ) ) ), fprod( [ 0.4275, temperature, power( wind_speed, 0.16 ) ] ) ] ) # in case someone puts in a silly velocity if result < -459.67: result = -459.67 return RPNMeasurement( result, 'degrees_F' ).convert( arguments[ 'temperature' ].units )
def invFourier(x,F): k1 = r2*mpm.pi f = [] for xi in x: kx = k1*(xi/Lx) f.append( mpm.fsum( F[ki]*mpm.cos(kx*mpm.mpf(ki)) \ for ki in xrange(len(F)) ) ) return f
def findCenteredPolygonalNumber( n, k ): if real_int( k ) < 3: raise ValueError( 'the number of sides of the polygon cannot be less than 3,' ) s = fdiv( k, 2 ) return nint( fdiv( fadd( sqrt( s ), sqrt( fsum( [ fmul( 4, real( n ) ), s, -4 ] ) ) ), fmul( 2, sqrt( s ) ) ) )
def getMultinomial( args ): numerator = fac( fsum( args ) ) denominator = 1 for arg in args: denominator = fmul( denominator, fac( arg ) ) return fdiv( numerator, denominator )
def within(self, distance): if self._within is None: # s_w is the sum of within-cluster (point to its medoid) distances. m = self.medoid s_w = fsum(distance(i, m) for i in self.members if i != m) n_w = len(self.members) self._within = (s_w, n_w) return self._within
def steady_state_eig(_matrix): values, vectors = eig(_matrix) real_values = [] for num in values: real_values.append(num.real) largest_eig_values = two_largest(real_values) # want the largest numbers because they should all be negative _index = real_values.index(max(real_values)) steady_state_raw = vectors[:, _index] steady_state = steady_state_raw / fsum(steady_state_raw) return steady_state, largest_eig_values
def getNthNonagonalTriangularNumber( n ): a = fmul( 3, sqrt( 7 ) ) b = fadd( 8, a ) c = fsub( 8, a ) return nint( fsum( [ fdiv( 5, 14 ), fmul( fdiv( 9, 28 ), fadd( power( b, real_int( n ) ), power( c, n ) ) ), fprod( [ fdiv( 3, 28 ), sqrt( 7 ), fsub( power( b, n ), power( c, n ) ) ] ) ] ) )
def test_stoch_eig_randmatrix(): N = 5 for j in range(10): P = mp.randmatrix(N, N) for i in range(N): P[i, :] /= mp.fsum(P[i, :]) run_stoch_eig(P, verbose=VERBOSE)
def calculateArithmeticMean( args ): if isinstance( args, RPNGenerator ): return calculateArithmeticMean( list( args ) ) elif isinstance( args, list ): if isinstance( args[ 0 ], ( list, RPNGenerator ) ): return [ calculateGeometricMean( list( arg ) ) for arg in args ] else: return fdiv( fsum( args ), len( args ) ) else: return args
def Fourier(N,f): k1 = r2*mpm.pi x = [-r1/r2+mpm.mpf(i)/mpm.mpf(2*N) for i in xrange(2*N)] fx = [f(Lx*xi) for xi in x] F = [] for ki in xrange(N): kx = k1*mpm.mpf(ki) F.append( mpm.fsum( fi*mpm.cos(kx*xi) for xi,fi in zip(x,fx) ) ) F[-1] /= mpm.mpf(N) F[0] /= r2 # scale the first coefficient (mean value) return F
def _boxcox_llf_deriv_not_finished(lmb, x): """ This function assumes lmb != 0. """ lmb = mpmath.mpf(lmb) x = [mpmath.mpf(t) for t in x] n = len(x) logdata = [mpmath.log(t) for t in x] sumlogdata = mpmath.fsum(logdata) mean_deriv_bc_sq = mpmath.fsum(2*(t**lmb/lmb)**2*(mpmath.log(t) - 1/lmb) for t in x) / n mean_bc_data = mpmath.fsum(t**lmb/lmb for t in x) / n deriv_mean_bc_data = mpmath.fsum(t**lmb/lmb * (mpmath.log(t) - 1/lmb) for t in x) / n dvar = mean_deriv_bc_sq - 2 * mean_bc_data * deriv_mean_bc_data return sumlogdata - n/2 * dvar
def sf(k, nc, ntotal, ngood, nsample): """ Survival function of Fisher's noncentral hypergeometric distribution. """ _hg._validate(ntotal, ngood, nsample) sup, p = support(nc, ntotal, ngood, nsample) if k < sup[0]: return mpmath.mp.one elif k >= sup[-1]: return mpmath.mp.zero else: return mpmath.fsum(p[k - sup[0] + 1:])
def _cdf_term(k, x, dfn, dfd, nc): halfnc = nc / 2 halfdfn = dfn / 2 halfdfd = dfd / 2 log_coeff = _mp.fsum([k * _mp.log(halfnc), -halfnc, -_mp.loggamma(k + 1)]) coeff = _mp.exp(log_coeff) r = coeff * _mp.betainc(a=halfdfn + k, b=halfdfd, x1=0, x2=dfn * x / (dfd + dfn * x), regularized=True) return r
def calculate_RAM_usage(shape): """ Calculate a lower bound to the RAM needed to allocate the arrays for efficient_gcf_calculation 8 arrays of shape full of 64-bit floats. There are also some lists whose space is hard to estimate. :param shape: see efficient_gcf_calculation :param length: see efficient_gcf_calculation :return float: RAM usage in bytes """ c = 200. return 8. * mp.fprod(shape) * 8. + c * mp.fsum(shape) * 8.
def getNthDecagonalCenteredSquareNumber( n ): sqrt10 = sqrt( 10 ) dps = 7 * int( real_int( n ) ) if mp.dps < dps: mp.dps = dps return nint( floor( fsum( [ fdiv( 1, 8 ), fmul( fdiv( 7, 16 ), power( fsub( 721, fmul( 228, sqrt10 ) ), fsub( n, 1 ) ) ), fmul( fmul( fdiv( 1, 8 ), power( fsub( 721, fmul( 228, sqrt10 ) ), fsub( n, 1 ) ) ), sqrt10 ), fmul( fmul( fdiv( 1, 8 ), power( fadd( 721, fmul( 228, sqrt10 ) ), fsub( n, 1 ) ) ), sqrt10 ), fmul( fdiv( 7, 16 ), power( fadd( 721, fmul( 228, sqrt10 ) ), fsub( n, 1 ) ) ) ] ) ) )
def multi_segment_score_pv(score, num_segments, raw=True): """ Compute p-value for normalized score when considering multiple high scoring segments. This functions considers the normalized score Sr', i.e., the normalized score of the HSP at rank r. For r=1, this formula is equivalent to single_segment_score_pv Computes formula [3] in Karlin & Altschul, PNAS 1993 Prob(Sr' >= x) ~ 1 - exp(-exp(-x)) * SUM (k=0 ... r - 1) { exp(-kx) / k! } Implementation detail: Python's range is not right-inclusive, go up to r, not r - 1 for summation :param score: :param num_segments: :param raw: return raw P-value instead of -log10(pv) :return: """ with mpm.workprec(NUM_PREC_KA_PV): def create_summand(sum_x, k): prec_k = mpm.convert(k) enum = mpm.exp(mpm.fneg(mpm.fmul(prec_k, sum_x))) denom = mpm.factorial(prec_k) summand = mpm.fdiv(enum, denom) return summand x = mpm.convert(score) r = num_segments complement = mpm.convert('1') factor1 = mpm.exp(mpm.fneg(mpm.exp(mpm.fneg(x)))) factor2 = mpm.fsum(map(lambda k: create_summand(x, k), range(0, r))) res = mpm.fsub(complement, mpm.fmul(factor1, factor2)) if not raw: res = mpm.fneg(mpm.log10(res)) res = float(res) # Equivalent implementation using Python standard library: # # x = score # r = num_segments # factor_1 = math.exp(-math.exp(-x)) # factor_2 = math.fsum(map(lambda k: math.exp(-k * x) / math.factorial(k), range(0, r))) # res = 1 - factor_1 * factor_2 # if not raw: # res = -1 * math.log10(res) return res
def normm(A, prec): M, N = np.array(A, ndmin=2).shape AN = np.array(A, ndmin=2) rows = M cols = N vals = [] for i in range(rows): for j in range(cols): vi = mv(mp.fabs(AN[i][j]), prec)**mv(2, prec) vals.append(vi) vf = mv(mp.fsum(vals), prec)**mv((1 / 2), prec) return vf
def logpmf(k, ntotal, ngood, untilnbad): """ Logarithm of the prob. mass function of the negative hypergeometric distr. """ _validate(ntotal, ngood, untilnbad) if k < 0 or k > ngood: return mpmath.mp.ninf with mpmath.extradps(5): t1 = logbinomial(k + untilnbad - 1, k) t2 = logbinomial(ntotal - untilnbad - k, ngood - k) t3 = logbinomial(ntotal, ngood) return mpmath.fsum([t1, t2, -t3])
def isOrderKSmithNumber(n, k): if isPrime(n) or n < 2: return 0 digitList1 = getDigitList(n, dropZeroes=True) digitList2 = [ item for sublist in [getDigitList(m, dropZeroes=True) for m in getFactors(n)] for item in sublist ] if sorted(digitList1) == sorted(digitList2): return 0 sum1 = fsum([power(i, k) for i in getDigitList(n)]) sum2 = fsum([ power(j, k) for j in [ item for sublist in [getDigitList(m) for m in getFactors(n)] for item in sublist ] ]) return 1 if sum1 == sum2 else 0
def __init__(self, clusterer, k): distance = clusterer._distance_lambda() n = clusterer._n self._n = n self._k = k self._distance = distance self._reset_random_swap() self._selected = _agoras(n, k, distance) self._selected.sort() self._clusters = [None] * n self._cost = fsum(self._update_clusters(self._unselected)) self._debug = clusterer.debug
def __init__(self, intervals): """ Create a new truncated distribution object This method is abstract : it has to be overriden Parameters ---------- intervals : [(float, float)] The intervals the distribution is truncated to """ self.intervals = intervals dps = 15 not_precise = True while not_precise: dps *= 2. Q = [self._cdf_notTruncated(a, b, dps) for a, b in intervals] not_precise = (fsum(Q) == 0.) self._sumQ = fsum(Q) self._dps = dps self._Q = Q
def gmean(x, weights=None): """ Geometric mean of the values in the sequence x. All the values in x must be nonnegative. If weights is not None, it must be a sequence with the same length as x. The sum of weights must not be zero. """ if any(t < 0 for t in x): raise ValueError("all values in x must be nonnegative.") with mpmath.extraprec(16): if weights is None: if 0 in x: return mpmath.mp.zero return mpmath.exp(mean([mpmath.log(t) for t in x])) else: # Weighted geometric mean wsum = mpmath.fsum(weights) if wsum == 0: raise ValueError('sum of weights must not be 0.') wlogxsum = mpmath.fsum([_xlogy(wi, xi) for (xi, wi) in zip(x, weights)]) return mpmath.exp(wlogxsum / wsum)
def logbeta(x, y): """ Natural logarithm of beta(x, y). The beta function is Gamma(x) Gamma(y) beta(x, y) = ----------------- Gamma(x + y) where Gamma(z) is the Gamma function. """ with mpmath.extradps(5): return (mpmath.loggamma(x) + mpmath.loggamma(y) - mpmath.loggamma(mpmath.fsum([x, y])))
def __init__(self, intervals): """ Create a new truncated distribution object This method is abstract : it has to be overriden Parameters ---------- intervals : [(float, float)] The intervals the distribution is truncated to """ self.intervals = intervals dps = 15 not_precise = True while not_precise: Q = [self._cdf_notTruncated(a, b, dps) for a, b in intervals] dps *= 2 not_precise = (fsum(Q) == 0.) self._sumQ = fsum(Q) self._dps = dps self._Q = Q
def getNthPadovanNumber( arg ): n = fadd( real( arg ), 4 ) a = root( fsub( fdiv( 27, 2 ), fdiv( fmul( 3, sqrt( 69 ) ), 2 ) ), 3 ) b = root( fdiv( fadd( 9, sqrt( 69 ) ), 2 ), 3 ) c = fadd( 1, fmul( mpc( 0, 1 ), sqrt( 3 ) ) ) d = fsub( 1, fmul( mpc( 0, 1 ), sqrt( 3 ) ) ) e = power( 3, fdiv( 2, 3 ) ) r = fadd( fdiv( a, 3 ), fdiv( b, e ) ) s = fsub( fmul( fdiv( d, -6 ), a ), fdiv( fmul( c, b ), fmul( 2, e ) ) ) t = fsub( fmul( fdiv( c, -6 ), a ), fdiv( fmul( d, b ), fmul( 2, e ) ) ) return nint( re( fsum( [ fdiv( power( r, n ), fadd( fmul( 2, r ), 3 ) ), fdiv( power( s, n ), fadd( fmul( 2, s ), 3 ) ), fdiv( power( t, n ), fadd( fmul( 2, t ), 3 ) ) ] ) ) )
def test_Matrix01(self): a = mpmath.mpc(mpmath.pi,"0") b = mpmath.mpc("0","1") m = mpArray.mpArray([[a,b],[b,a]]) mat = mpArray.mpMatrix(m) evals, evecs = mat.eigen(False, 'qeispack', verbose=True) self.assertTrue(mpmath.fabs(mpmath.mpc(mpmath.pi,"1") - evals[0]) < 1e-32) self.assertTrue(mpmath.fabs(mpmath.mpc(mpmath.pi,"-1") - evals[1]) < 1e-32) self.assertTrue(numpy.all([mpmath.fabs(mpmath.mpf(1)/mpmath.sqrt("2") - mpmath.fabs(x)) < 1e-32 for x in evecs[0]])) self.assertTrue(numpy.all([mpmath.fabs(mpmath.mpf(1)/mpmath.sqrt("2") - mpmath.fabs(x)) < 1e-32 for x in evecs[1]])) self.assertTrue(mpmath.fabs(mpmath.fsum(evecs[0]*evecs[0].conj())) - mpmath.mpf(1) < 1e-32) self.assertTrue(numpy.all([vec.abs2() -mpmath.mpf(1) < 1e-32 for vec in evecs])) self.assertTrue(mpmath.fabs(evecs[0].inner(evecs[1])) < 1e-32)
def I_graph(m, µ, σ): """ Return two array x, y representing the graph of I (with mean value m, mean log_pgen µ, var log_pgen σ) """ rg = np.linspace(0, 3 * m * µ, 1000) S = [(k, scipy.stats.poisson.pmf(k, m)) for k in np.arange(1, max(int(3*m), 200))] Is = [float(mpmath.fsum(1/mpmath.sqrt(2 * np.pi * σ**2 * k) * mpmath.exp(-m) * mpmath.power(m, k) / mpmath.factorial(k) * mpmath.exp(-(x - μ*k)**2/(2 * σ**2 * k)) for k, s in S if s > 1e-6)) + (scipy.stats.poisson.pmf(0, m) if x == 0. else 0) for x in rg] return np.array(rg), np.array(Is)
def nll_hess(x, k, theta): """ Gamma distribution hessian of the negative log-likelihood function. """ _validate_k_theta(k, theta) k = mpmath.mpf(k) theta = mpmath.mpf(theta) N = len(x) sumx = mpmath.fsum(x) # sumlnx = mpmath.fsum(mpmath.log(t) for t in x) dk2 = -N*mpmath.psi(1, k) dkdtheta = -N/theta dtheta2 = -2*sumx/theta**3 + N*k/theta**2 return mpmath.matrix([[-dk2, -dkdtheta], [-dkdtheta, -dtheta2]])
def getSum( n ): if isinstance( n, RPNGenerator ): return getSum( list( n ) ) elif isinstance( n[ 0 ], ( list, RPNGenerator ) ): return [ getSum( arg ) for arg in n ] result = None try: result = fsum( n ) except: result = n[ 0 ] for i in n[ 1 : ]: result = add( result, i ) return result
def calculateArithmeticMean( args ): if isinstance( args, RPNGenerator ): total = 0 count = 0 for i in args: total += i count += 1 return fdiv( total, count ) elif isinstance( args, list ): if isinstance( args[ 0 ], ( list, RPNGenerator ) ): return [ calculateArithmeticMean( list( arg ) ) for arg in args ] else: return fdiv( fsum( args ), len( args ) ) else: return args
def getSum( n ): if isinstance( n, RPNGenerator ): return getSum( list( n ) ) if isinstance( n[ 0 ], ( list, RPNGenerator ) ): return [ getSum( arg ) for arg in n ] result = None try: result = fsum( n ) except TypeError: result = n[ 0 ] for i in n[ 1 : ]: result = add( result, i ) return result
def boxcox_llf(lmb, x): lmb = mpmath.mpf(lmb) x = [mpmath.mpf(t) for t in x] n = len(x) logdata = [mpmath.log(t) for t in x] sumlogdata = mpmath.fsum(logdata) # Compute the variance of the transformed data. if lmb == 0: variance = var(logdata) else: # Transform without the constant offset 1/lmb. The offset does # not effect the variance, and the subtraction of the offset can # lead to loss of precision. variance = var([t**lmb / lmb for t in x]) return (lmb - 1) * sumlogdata - n/2 * mpmath.log(variance)
def calculateRootMeanSquare( args ): if isinstance( args, RPNGenerator ): total = 0 count = 0 for i in args: total += power( i, 2 ) count += 1 return square( fdiv( total, count ) ) elif isinstance( args, list ): if isinstance( args[ 0 ], ( list, RPNGenerator ) ): return [ calculateRootMeanSquare( list( arg ) ) for arg in args ] elif isinstance( args[ 0 ], RPNMeasurement ): pass # TODO: handle measurements else: return sqrt( fdiv( fsum( args, squared=True ), len( args ) ) ) else: return args
def nll_invhess(x, k, theta): """ Gamma distribution inverse of the hessian of the negative log-likelihood. """ _validate_k_theta(k, theta) k = mpmath.mpf(k) theta = mpmath.mpf(theta) N = len(x) sumx = mpmath.fsum(x) # sumlnx = mpmath.fsum(mpmath.log(t) for t in x) dk2 = -N*mpmath.psi(1, k) dkdtheta = -N/theta dtheta2 = -2*sumx/theta**3 + N*k/theta**2 det = dk2*dtheta2 - dkdtheta**2 return mpmath.matrix([[-dtheta2/det, dkdtheta/det], [dkdtheta/det, -dk2/det]])
def trapezium_rule(equation, lower_bound, upper_bound, n) -> str: """ Given an equation, lower bound, upper bound and number of strips, returns the area under a curve by dividing the area into n trapezium and summing the individual area. >>> trapezium_rule([1, 0, 0], 0, 2, 100) '2.6668' >>> trapezium_rule([1, -2, 0, 2], -0.5, 2, 2000.5) '3.56645978' >>> trapezium_rule([1, -4, 3, 0], 0, 1, 1000) '0.416665257' """ # Divide the total width by the number of division to get the width of each trapezium trapezium_width = fdiv( abs(fsub(upper_bound, lower_bound)), n ) # Tabulates the temporary results by mapping each y to f(x) ordinates_table = list(map( lambda x: func(equation, x), np.arange( np.float64(lower_bound), np.float64(upper_bound), trapezium_width ) )) ans = fmul( fdiv(trapezium_width, 2), fadd( # Sum of the first and last ordinates fadd(ordinates_table[0], ordinates_table[-1]), fmul(2, # Sum of the remaining ordinates fsum(ordinates_table[1:-1])) ) ) return str(abs(ans))
def calculateHeatIndexOperator( measurement1, measurement2 ): ''' https://en.wikipedia.org/wiki/Heat_index#Formula ''' # pylint: disable=invalid-name validUnitTypes = [ [ 'temperature', 'constant' ], ] arguments = matchUnitTypes( [ measurement1, measurement2 ], validUnitTypes ) if not arguments: raise ValueError( '\'heat_index\' requires a temperature measurement and the relative humidity in percent' ) T = arguments[ 'temperature' ].convert( 'degrees_F' ).value R = arguments[ 'constant' ] if T < 80: raise ValueError( '\'heat_index\' is not defined for temperatures less than 80 degrees fahrenheit' ) if R < 0.4 or R > 1.0: raise ValueError( '\'heat_index\' requires a relative humidity value ranging from 40% to 100%' ) R = fmul( R, 100 ) c1 = -42.379 c2 = 2.04901523 c3 = 10.14333127 c4 = -0.22475541 c5 = -6.83783e-3 c6 = -5.481717e-2 c7 = 1.22874e-3 c8 = 8.5282e-4 c9 = -1.99e-6 heatIndex = fsum( [ c1, fmul( c2, T ), fmul( c3, R ), fprod( [ c4, T, R ] ), fprod( [ c5, T, T ] ), fprod( [ c6, R, R ] ), fprod( [ c7, T, T, R ] ), fprod( [ c8, T, R, R ] ), fprod( [ c9, T, T, R, R ] ) ] ) return RPNMeasurement( heatIndex, 'fahrenheit' ).convert( arguments[ 'temperature' ].units )
def asymptotic_expansion(self, omega): # Evaluate the modulus of the characteristic function domain = np.linspace(0, 5, 500) char_fn = list(map(lambda t: mp.fabs(self.char_fn(t)), domain)) # thresh 1e-40 thresh_check = [ domain[i] for i in range(500) if char_fn[i] < mp.mpf(1e-50) ] # Need to extend the domain if len(list(thresh_check)) == 0: j = 1 while len(list(thresh_check)) == 0: domain = np.linspace(5 * j, 5 * (j + 1), 500) char_fn = list(map(lambda t: mp.fabs(self.char_fn(t)), domain)) # thresh 1e-40 thresh_check = [ domain[i] for i in range(500) if char_fn[i] < mp.mpf(1e-50) ] j += 1 cutoff = thresh_check[0] # Generate the derivatives for the asymptotic expansion order = 6 if not hasattr(self, 'diffs'): self.gen_diffs(order) # Evaluate the expansion asym_series = mp.matrix(order, 1) for i in range(1, order + 1): asym_series[i - 1] = self.series_term(i, omega, cutoff) # Sum up and take the real part # We do not multiply by (-1) because the fact that we have g(x) = -x cancels this return mp.re(mp.fsum(asym_series))
def logpmf(k, ntotal, ngood, nsample): """ Logarithm of the PMF of the hypergeometric distribution. `logpmf` computes the natural logarithm of the probability mass function of the hypergeometric distribution. """ _validate(ntotal, ngood, nsample) nbad = ntotal - ngood with mpmath.extradps(5): # numerator terms terms = [ mpmath.log(ntotal + 1), logbeta(ntotal - nsample + 1, nsample + 1) ] # denominator terms terms.extend([ -mpmath.log(ngood + 1), -mpmath.log(nbad + 1), -logbeta(k + 1, ngood - k + 1), -logbeta(nsample - k + 1, nbad - nsample + k + 1) ]) return mpmath.fsum(terms)
def calculateRootMeanSquare( args ): if isinstance( args, RPNGenerator ): total = 0 count = 0 for i in args: total += power( i, 2 ) count += 1 return square( fdiv( total, count ) ) if isinstance( args, list ): if isinstance( args[ 0 ], ( list, RPNGenerator ) ): return [ calculateRootMeanSquare( list( arg ) ) for arg in args ] if isinstance( args[ 0 ], RPNMeasurement ): # TODO: handle measurements raise ValueError( '\'root_mean_square\' doesn\'t support measurements' ) return sqrt( fdiv( fsum( args, squared=True ), len( args ) ) ) return args
def calculateArithmeticMean( args ): if isinstance( args, RPNGenerator ): total = 0 count = 0 for i in args: total += i count += 1 return fdiv( total, count ) if isinstance( args, list ): if isinstance( args[ 0 ], ( list, RPNGenerator ) ): return [ calculateArithmeticMean( list( arg ) ) for arg in args ] if isinstance( args[ 0 ], RPNMeasurement ): # TODO: handle measurements raise ValueError( '\'mean\' doesn\'t support measurements (yet)' ) return fdiv( fsum( args ), len( args ) ) return args
def getInvertedBits( n ): value = real_int( n ) # determine how many groups of bits we will be looking at if value == 0: groupings = 1 else: groupings = int( fadd( floor( fdiv( ( log( value, 2 ) ), g.bitwiseGroupSize ) ), 1 ) ) placeValue = mpmathify( 1 << g.bitwiseGroupSize ) multiplier = mpmathify( 1 ) remaining = value result = mpmathify( 0 ) for i in range( 0, groupings ): result = fadd( fmul( fsum( [ placeValue, fneg( fmod( remaining, placeValue ) ), -1 ] ), multiplier ), result ) remaining = floor( fdiv( remaining, placeValue ) ) multiplier = fmul( multiplier, placeValue ) return result
def calculateHeatIndex( measurement1, measurement2 ): validUnitTypes = [ [ 'temperature', 'constant' ], ] arguments = matchUnitTypes( [ measurement1, measurement2 ], validUnitTypes ) if not arguments: raise ValueError( '\'heat_index\' requires a temperature measurement and the relative humidity in percent' ) T = arguments[ 'temperature' ].convert( 'degrees_F' ).value R = arguments[ 'constant' ] if T < 80: raise ValueError( '\'heat_index\' is not defined for temperatures less than 80 degrees fahrenheit' ) if R < 0.4 or R > 1.0: raise ValueError( '\'heat_index\' requires a relative humidity value ranging from 40% to 100%' ) R = fmul( R, 100 ) c1 = -42.379 c2 = 2.04901523 c3 = 10.14333127 c4 = -0.22475541 c5 = -6.83783e-3 c6 = -5.481717e-2 c7 = 1.22874e-3 c8 = 8.5282e-4 c9 = -1.99e-6 heatIndex = fsum( [ c1, fmul( c2, T ), fmul( c3, R ), fprod( [ c4, T, R ] ), fprod( [ c5, T, T ] ), fprod( [ c6, R, R ] ), fprod( [ c7, T, T, R ] ), fprod( [ c8, T, R, R ] ), fprod( [ c9, T, T, R, R ] ) ] ) return RPNMeasurement( heatIndex, 'fahrenheit' ).convert( arguments[ 'temperature' ].units )
def _mle_scale_func(scale, x, xbar): emx = [mpmath.exp(-xi/scale) for xi in x] s1 = mpmath.fsum([xi * emxi for xi, emxi in zip(x, emx)]) s2 = mpmath.fsum(emx) return s2*(xbar - scale) - s1
def swap(self): try: i, h = self._next_random_swap() except StopIteration: self._debug('all swaps tried') return False self._debug('eval swap', i, h) # try to swap medoid i with non-medoid h clusters = self._clusters distance = self._distance # we differentiate fast_updates and slow_updates. fast_updates # need to update only the second-nearest medoid. slow_updates # need to update the nearest and second-nearest medoid. fast_updates = [] def calculate_t(): # i attaches to a medoid yield min( distance(i, j) for j in chain(self._selected, [h]) if j != i) # h detaches from its medoid yield -distance(h, clusters[h][0]) # look at all other points for j in self._unselected: if j == h: continue # see [Ng1994] for a description of the following calculations n1, n2 = clusters[j] # nearest two medoids dh = distance(j, h) # d(Oj, Oh) if n1 == i: # is j in cluster i? d2 = distance(j, n2) # d(Oj, Oj,2) if dh >= d2: # case (1); j does not go to h yield d2 - distance(j, i) fast_updates.append((j, n2)) else: # case (2); j goes to h yield dh - distance(j, i) fast_updates.append((j, h)) else: k = clusters[j][0] d2 = distance(j, k) if dh >= d2: # case (3) # j stays in current cluster. second nearest medoid # to j might change with the introduction of h though. fast_updates.append((j, k)) else: # case (4) yield dh - d2 fast_updates.append((j, h)) t = fsum(calculate_t()) if t < 0: # swap is an improvement? self._debug('ACCEPT swap t:%f' % t, i, h) self._cost += t selected = self._selected del selected[selected.index(i)] bisect.insort(selected, h) slow_updates = [i] # update i's distances to medoids for j, m in fast_updates: if m == i: slow_updates.append(j) else: min_d = None min_k = None for k in selected: if k != m: d = distance(k, j) if min_d is None or d < min_d: min_d = d min_k = k assert m != min_k and min_k is not None clusters[j] = (m, min_k) # update other second distances. for _ in self._update_clusters(slow_updates): pass # ignore distances (no influence on cost here). # we swapped, so we want to allow previously used partners. self._reset_random_swap() return True else: return False
def eqn8(N, B): sumterms = [power(B, n) / factorial(n) for n in range(int(N) + 1)] return 1. / (exp(-B) * fsum(sumterms))
def fma(m, n, a): list1 = [] for b in range(m + 1): list1.append( (a**(m * n + 1 + b) * (1 - a)**(m - b)) / factorial(m * n + b + 1)) return An(n)**m * factorial(m * n) * fsum(list1)
def apply(self, items, evaluation): 'Plus[items___]' items = items.numerify(evaluation).get_sequence() leaves = [] last_item = last_count = None prec = min_prec(*items) is_machine_precision = any(item.is_machine_precision() for item in items) numbers = [] def append_last(): if last_item is not None: if last_count == 1: leaves.append(last_item) else: if last_item.has_form('Times', None): last_item.leaves.insert(0, from_sympy(last_count)) leaves.append(last_item) else: leaves.append(Expression( 'Times', from_sympy(last_count), last_item)) for item in items: if isinstance(item, Number): numbers.append(item) else: count = rest = None if item.has_form('Times', None): for leaf in item.leaves: if isinstance(leaf, Number): count = leaf.to_sympy() rest = item.leaves[:] rest.remove(leaf) if len(rest) == 1: rest = rest[0] else: rest.sort() rest = Expression('Times', *rest) break if count is None: count = sympy.Integer(1) rest = item if last_item is not None and last_item == rest: last_count = last_count + count else: append_last() last_item = rest last_count = count append_last() if numbers: if prec is not None: if is_machine_precision: numbers = [item.to_mpmath() for item in numbers] number = mpmath.fsum(numbers) number = Number.from_mpmath(number) else: with mpmath.workprec(prec): numbers = [item.to_mpmath() for item in numbers] number = mpmath.fsum(numbers) number = Number.from_mpmath(number, dps(prec)) else: number = from_sympy(sum(item.to_sympy() for item in numbers)) else: number = Integer(0) if not number.same(Integer(0)): leaves.insert(0, number) if not leaves: return Integer(0) elif len(leaves) == 1: return leaves[0] else: leaves.sort() return Expression('Plus', *leaves)
def solveQuarticPolynomialOperator( _a, _b, _c, _d, _e ): # pylint: disable=invalid-name ''' This function applies the quartic formula to solve a polynomial with coefficients of a, b, c, d, and e. ''' if mp.dps < 50: mp.dps = 50 # maybe it's really an order-3 polynomial if _a == 0: return solveCubicPolynomial( _b, _c, _d, _e ) # degenerate case, just return the two real and two imaginary 4th roots of the # constant term divided by the 4th root of a if _b == 0 and _c == 0 and _d == 0: e = fdiv( _e, _a ) f = root( _a, 4 ) x1 = fdiv( root( fneg( e ), 4 ), f ) x2 = fdiv( fneg( root( fneg( e ), 4 ) ), f ) x3 = fdiv( mpc( 0, root( fneg( e ), 4 ) ), f ) x4 = fdiv( mpc( 0, fneg( root( fneg( e ), 4 ) ) ), f ) return [ x1, x2, x3, x4 ] # otherwise we have a regular quartic to solve b = fdiv( _b, _a ) c = fdiv( _c, _a ) d = fdiv( _d, _a ) e = fdiv( _e, _a ) # we turn the equation into a cubic that we can solve f = fsub( c, fdiv( fmul( 3, power( b, 2 ) ), 8 ) ) g = fsum( [ d, fdiv( power( b, 3 ), 8 ), fneg( fdiv( fmul( b, c ), 2 ) ) ] ) h = fsum( [ e, fneg( fdiv( fmul( 3, power( b, 4 ) ), 256 ) ), fmul( power( b, 2 ), fdiv( c, 16 ) ), fneg( fdiv( fmul( b, d ), 4 ) ) ] ) roots = solveCubicPolynomial( 1, fdiv( f, 2 ), fdiv( fsub( power( f, 2 ), fmul( 4, h ) ), 16 ), fneg( fdiv( power( g, 2 ), 64 ) ) ) y1 = roots[ 0 ] y2 = roots[ 1 ] y3 = roots[ 2 ] # pick two non-zero roots, if there are two imaginary roots, use them if y1 == 0: root1 = y2 root2 = y3 elif y2 == 0: root1 = y1 root2 = y3 elif y3 == 0: root1 = y1 root2 = y2 elif im( y1 ) != 0: root1 = y1 if im( y2 ) != 0: root2 = y2 else: root2 = y3 else: root1 = y2 root2 = y3 # more variables... p = sqrt( root1 ) q = sqrt( root2 ) r = fdiv( fneg( g ), fprod( [ 8, p, q ] ) ) s = fneg( fdiv( b, 4 ) ) # put together the 4 roots x1 = fsum( [ p, q, r, s ] ) x2 = fsum( [ p, fneg( q ), fneg( r ), s ] ) x3 = fsum( [ fneg( p ), q, fneg( r ), s ] ) x4 = fsum( [ fneg( p ), fneg( q ), r, s ] ) return [ chop( x1 ), chop( x2 ), chop( x3 ), chop( x4 ) ]
def _kmeans(self, k): # implements an improved version of kmeans, see [Hamerly2010]. x = self.x d = self.d assert k <= len(x) # pick initial clusters. actually quite an important step. c = list(islice(self._pick_initial(), 0, k)) assert len(c) == k q = [0] * len(c) cc = [[0] * len(x[0]) for _ in range(k)] a = [0] * len(x) u = [0] * len(x) l = [0] * len(x) for i, xi in enumerate(x): ai, u[i], _, l[i] = _smallest2(d(xi, cj) for cj in c) q[ai] += 1 cc[ai] = _pairwise_sum(cc[ai], xi) a[i] = ai s = [0] * len(c) p = [0] * len(c) change = None while change is None or change > self.epsilon: # find cluster distances cd = self._distances(c) s = [ min(cd(j1, j2) for j2 in range(k) if j2 != j1) for j1 in range(k) ] # find new assignments for i, ai in enumerate(a): m = max(s[ai] / 2., l[i]) if u[i] > m: xi = x[i] u[i] = d(xi, c[ai]) if u[i] > m: new_ai, u[i], _, l[i] = _smallest2( d(xi, cj) for cj in c) if new_ai != ai: q[ai] -= 1 q[new_ai] += 1 cc[ai] = [a - b for a, b in zip(cc[ai], xi)] cc[new_ai] = [ a + b for a, b in zip(cc[new_ai], xi) ] a[i] = new_ai # move centers empty_cluster = False for j in range(len(c)): qj = q[j] if qj == 0: empty_cluster = True break c_old = c[j] c_new = [ccj / qj for ccj in cc[j]] distance = d(c_old, c_new) c[j] = c_new p[j] = distance if empty_cluster: break # update bounds r1, p1, r2, p2 = _largest2(p) for i, ai in enumerate(a): u[i] += p[ai] if r1 == ai: l[i] -= p2 else: l[i] -= p1 change = sum(p) # compute an approximate silhouette index within = [0] * len(c) for i, ai in enumerate(a): within[ai] += d(x[i], c[ai]) for j in range(len(c)): if q[j] == 1: return a, -1. # no good config within[j] /= q[j] - 1 silhouette = fsum(_silhouette(a, b) for a, b in zip(within, s)) / len(c) return a, silhouette