Пример #1
0
def get_pfix_transformed(p0, x4Ns, h):
    """
    Try to get the same result as the function in the kimura module.
    Change of variables according to eqn 3 of Chen et al.
    When I type
    (integral from 0 to p of exp(b*s*(x-a)**2) ) /
    (integral from 0 to 1 of exp(b*s*(x-a)**2) )
    I get
    ( erfi(sqrt(b)*sqrt(s)*a) - erfi(sqrt(b)*sqrt(s)*(a-p)) ) /
    ( erfi(sqrt(b)*sqrt(s)*a) - erfi(sqrt(b)*sqrt(s)*(a-1)) )
    @param p0: proportion of mutant alleles in the population
    @param x4Ns: 4Ns
    @return: fixation probability
    """
    if not x4Ns:
        # This is the neutral case.
        return p0
    if h == 0.5:
        # This is the genic case.
        # Checking for exact equality of 0.5 is OK.
        return math.expm1(-x4Ns*p0) / math.expm1(-x4Ns)
    b = 2.0 * h - 1.0
    a = h / (2.0 * h - 1.0)
    q = cmath.sqrt(b) * cmath.sqrt(x4Ns)
    #
    top = kimura.erfi(q*a) - kimura.erfi(q*(a-p0))
    bot = kimura.erfi(q*a) - kimura.erfi(q*(a-1))
    return top / bot
Пример #2
0
def transition(dist, a, f, logspace=0):
    """
    Compute transition probabilities for a HMM. 
    
    to compute transition probabilities between hidden-states,
    when moving from time t to t+1,
    the genetic distance (cM) between the two markers are required.
    
    Assuming known parameters a and f.
    lf logspace = 1, calculations are log-transformed.
    
    Key in dictionary: 0 = not-IBD, 1 = IBD.
    """
    if logspace == 0:
        qk = exp(-a*dist)

        T = { # 0 = not-IBD, 1 = IBD
            1: {1: (1-qk)*f + qk, 0: (1-qk)*(1-f)},
            0: {1: (1-qk)*f, 0: (1-qk)*(1-f) + qk}
            }
        
    else:
        if dist == 0:
            dist = 1e-06

        ff = 1-f
        ad = a*dist
        A = expm1(ad)
        AA = -expm1(-ad)
        T = { # 0 = not-IBD, 1 = IBD
            1: {1: log1p(A*f)-ad, 0: log(AA*ff)},
            0: {1: log(AA*f), 0: log1p(A*ff)-ad}}
    return T
Пример #3
0
def get_fixation_probability_chen(p, s, h):
    """
    This uses the parameter conventions from Christina Chen et al. 2008.
    @param p: initial allele frequency
    @param s: positive when the mutant allele is fitter
    @param h: dominance
    @return: fixation probability
    """
    s_eff = 2.0 * s
    beta = 2.0 * h - 1.0
    if not s_eff:
        return p
    if not beta:
        return math.expm1(-s_eff*p) / math.expm1(-s_eff)
    alpha = h / beta
    if beta * s_eff > 0:
        # overdominant if 0 < alpha < 1
        f = erfi
    elif beta * s_eff < 0:
        # underdominant if 0 < alpha < 1
        f = special.erf
    else:
        raise ValueError
    L = math.sqrt(abs(beta * s_eff))
    a0 = f(L*(0 - alpha))
    a1 = f(L*(p - alpha))
    b0 = f(L*(0 - alpha))
    b1 = f(L*(1 - alpha))
    pfix = (a1 - a0) / (b1 - b0)
    return pfix
Пример #4
0
def f_fix(x, N_diploid, s):
    """
    The limits at s=0 and x=0 and x=1 are not implemented yet.
    Ultimately this function will be used as a term of an integrand
    whose integral will possibly be a well known special function.
    """
    return math.expm1(-4*N_diploid*s*x) / math.expm1(-4*N_diploid*s)
Пример #5
0
def J2_integrand(x, S):
    """
    This is part of equation (17) of Kimura and Ohta.
    """
    a = math.expm1(2*S*x)
    b = math.expm1(-2*S*x)
    c = x / (1 - x)
    return -(a * b) / c
Пример #6
0
	def SetCommand(self,linear=0,angular=0):
		# TODO: Implement expoenetial control and / or thresholded control strategy in here:
		if exponential_control == True:
			vel_linear  = expm1(abs(4*linear) ) / exponential_max
			vel_angular = expm1(abs(4*angular)) / exponential_max
			self.command.linear.x  = copysign(vel_linear , linear )
			self.command.angular.z = -1 * copysign(vel_angular, angular)
		else:
			self.command.linear.x  = linear
			self.command.angular.z = angular
Пример #7
0
def kimura_sojourn_helper(a, x):
    """
    Computes (exp(ax) - 1) / (exp(a) - 1) and accepts a=0.
    @param a: a scaled selection, can take any value
    @param x: a proportion between 0 and 1
    @return: a nonnegative value
    """
    if not a:
        return x
    else:
        return math.expm1(a*x) / math.expm1(a)
Пример #8
0
def get_pfix_approx(N_diploid, s):
    """
    This is an approximation of the fixation probability.
    It is derived by applying equation (3) in Kimura 1962,
    where p is the proportion of the preferred gene in the population,
    which in this case is 1/(2N) because it is a new mutant.
    It is given directly as equation (10).
    """
    N = N_diploid * 2
    if s:
        return math.expm1(-s) / math.expm1(-N*s)
    else:
        return 1.0 / N
Пример #9
0
def alpha_merge_eqn(x, alpha, beta, x0, opthin=False):
    """Equation we need the root for to merge power law to modified
    blackbody

    Parameters
    ----------
    x : float
      h nu / k T to evaluate at

    alpha : float
      blue side power law index

    beta : float
      Dust attenuation power law index

    x0 : float
      h nu_0 / k T

    opthin : bool
      Assume optically thin case
    """

    try:
        # This can overflow badly
        xox0beta = (x / x0)**beta
        bterm = xox0beta / math.expm1(xox0beta)
    except OverflowError:
        # If xox0beta is very large, then the bterm is zero
        bterm = 0.0
    return x - (1.0 - math.exp(-x)) * (3.0 + alpha + beta * bterm)
Пример #10
0
    def __init__(self, T, fnorm, wavenorm=500.0):
        """Initializer

        Parameters:
        -----------
        T : float
          Temperature/(1+z) in K

        fnorm : float
          Normalization flux, in mJy

        wavenorm : float
          Wavelength of normalization flux, in microns (def: 500)
        """

        self._T = float(T)
        self._fnorm = float(fnorm)
        self._wavenorm = float(wavenorm)

        # Some constants -- eventually, replace these with
        # astropy.constants, but that is in development, so hardwire
        # for now
        self._hcokt = h * c / (k * self._T)
        self._xnorm = self._hcokt / self._wavenorm
        self._normfac = self._fnorm * math.expm1(self._xnorm) / \
            self._xnorm**3
Пример #11
0
 def __init__(self, arms, rounds):
     self._arms = arms
     self._dist = np.ones(arms) / arms
     sim_learning_rate = min(1.0,
                             sqrt((arms * log(arms)) /
                                  (expm1(1.0) * rounds)))
     self._alpha = 3 * sim_learning_rate
Пример #12
0
def expm1(space, d):
    """ Returns exp(number) - 1, computed in a way that is
    accurate even when the value of number is close to zero"""
    try:
        return space.wrap(math.expm1(d))
    except OverflowError:
        return space.wrap(rfloat.INFINITY)
def gen_modified_branch_history_sample(
        initial_state, final_state, blen_in, rates, P, t0=0.0):
    """
    This is a helper function for Nielsen modified rejection sampling.
    Yield (transition time, new state) pairs.
    The idea is to sample a path which may need to be rejected,
    and it is slightly clever in the sense that the path does not
    need to be rejected as often as do naive forward path samples.
    In more detail, this path sampler will generate paths
    conditional on at least one change occurring on the path,
    when appropriate.
    @param initial_state: initial state
    @param final_state: initial state
    @param blen_in: length of the branch
    @param rates: the rate away from each state
    @param P: transition matrix conditional on leaving a state
    @param t0: initial time
    """
    t = t0
    state = initial_state
    if state != final_state:
        rate = rates[initial_state]
        u = random.random()
        delta_t = -math.log1p(u*math.expm1(-blen_in*rate)) / rate
        t += delta_t
        if t >= blen_in:
            return
        distn = P[state]
        state = cmedbutil.random_category(distn)
        yield t, state
    for t, state in gen_branch_history_sample(state, blen_in, rates, P, t0=t):
        yield t, state
Пример #14
0
 def fprime(curves):
      if curves == 0:
           return work_per_curve / (2*odds_factor_exists)
      arg = curves/median_curves
      l = -expm1(-arg) # 1-exp(-arg)
      # d/dx (x/l) = (l - x*l')/l^2
      # x*l' = arg*(1-l)
      return work_per_curve / odds_factor_exists * (l+arg*(l-1)) / (l*l)
Пример #15
0
    def calculateFalsePositiveProbabilityForNumberOfHashes(self, aNumberOfHashes):
        theInnerPower = -1.0 * (float( aNumberOfHashes ) * float(self.myNumWordsAdded)) / float(self.getBitArraySize())
        theInnerExpression = -1.0 * math.expm1(theInnerPower)

        theOuterPower = float( aNumberOfHashes )
        theOuterExpression = math.pow(theInnerExpression, theOuterPower)

        return theOuterExpression * 100.0
Пример #16
0
def log1mexp(a):
    """
            Computes log(1-exp(a)) according to Machler, "Accurately computing ..."
            Note: a should be a large negative value!
    """
    if a > 0: print >>sys.stderr, "# Warning, log1mexp with a=", a, " > 0"
    if a < -log(2.0): return log1p(-exp(a))
    else:             return log(-expm1(a))
Пример #17
0
 def __init__(self, options, rounds, highprob=False, lr=None):
     if lr == None:
         self._learning_rate = min(1.0, sqrt((options * log(options)) / (expm1(1) * rounds)))
     else:
         self._learning_rate = lr
     self._weights = np.ones(options, dtype=float)
     self.prob = self._weights / options
     self.gains = np.zeros(options)
Пример #18
0
def J1_integrand(x, S):
    """
    This is part of equation (17) of Kimura and Ohta.
    """
    a = math.expm1(2*S*x)
    b = math.exp(-2*S*x) - math.exp(-2*S)
    c = x * (1 - x)
    return (a * b) / c
Пример #19
0
def log1mexp_numba(a):
    if a > 0:
        print "LOGEXP"
        return

    if a < -log(2.0):
        return log1p(-exp(a))
    else:
        return log(-expm1(a))
Пример #20
0
def ciclo_none(rtot_species, period, Nindivs, invK, L_abs):
    rcal = rtot_species
    rspneq = calc_r_periodo_vh(rcal,invperiod) if (rcal>=0) else calc_r_periodo_vh(-rcal,invperiod)
    incNmalth= np.random.binomial(Nindivs,-math.expm1(-rspneq))
    if (rcal>0):
        inc_pop = Nindivs + incNmalth 
    else:
        inc_pop = Nindivs - incNmalth
    return([inc_pop,rcal])    
Пример #21
0
    def _dmass_calc(self, step, opz, bnu_fac, temp_fac, knu_fac, opthin, dl2):
        """Internal function to comput dustmass in 10^8 M_sun,
        given various pre-computed values"""

        msolar8 = 1.97792e41  # mass of the sun*10^8 in g
        T = step[0] * opz
        beta = step[1]
        S_nu = step[4] * 1e-26  # to erg / s-cm^2-Hz from mJy
        B_nu = bnu_fac / math.expm1(temp_fac / T)  # Planck function
        # Scale kappa with freq (obs frame ok).  Factor of 10 is
        #  m^2 kg^-1 -> cm^2 g^-1 conversion
        K_nu = 10.0 * self._kappa * knu_fac ** (-beta)
        dustmass = dl2 * S_nu / (opz * K_nu * B_nu * msolar8)
        if not opthin:
            tau_nu = (step[2] / self._wavenorm) ** beta
            op_fac = -tau_nu / math.expm1(-tau_nu)
            dustmass *= op_fac
        return dustmass
Пример #22
0
def predators_effect(p_devorados, j, Nindividuals_p,
                     Nindividuals_c, minputchar_c, numspecies_c, n, k):
    for j in range(numspecies_c):
        if (minputchar_c[n][j] > 0):
            rceff = calc_r_periodo_vh(Nindividuals_c[k][j] * minputchar_c[n][j],
                                      invperiod)
            p_devorados = p_devorados + np.random.binomial(Nindividuals_p[k][n],
                                                        -math.expm1(-1 * rceff))
    return p_devorados, j, rceff
Пример #23
0
 def ageMortalityRate(self): 
     a = 0.005
     b = 7.0
     s = 0.7
 
     self.MRdaily = (a*math.exp(self.age/b))/(1.0+a*s*b*(math.expm1(self.age/b)-1.0))
     self.SRdaily = 1.0 - self.MRdaily
     self.SRhourly = self.SRdaily**(1.0/24.0)
     self.MRhourly = 1.0 - self.SRhourly
     return self.MRdaily
Пример #24
0
def _log1mexp(x):
  """Numerically stable computation of log(1-exp(x))."""
  if x < -1:
    return math.log1p(-math.exp(x))
  elif x < 0:
    return math.log(-math.expm1(x))
  elif x == 0:
    return -np.inf
  else:
    raise ValueError("Argument must be non-positive.")
Пример #25
0
def normalize_exp(x):
  """
  Returns
     -(exp(-x) - 1)
   = -(exp(-x) - 1)
   = 1 - exp(-x)

  This gives us a nicely normalised distance measure that penalises large
  values subproportionally.
  """
  return -expm1(-x)
Пример #26
0
def _log_sub(logx, logy):
  """Subtract two numbers in the log space. Answer must be positive."""
  if logy == -np.inf:  # subtracting 0
    return logx
  assert logx > logy

  try:
    # Use exp(x) - exp(y) = (exp(x - y) - 1) * exp(y).
    return math.log(math.expm1(logx - logy)) + logy  # expm1(x) = exp(x) - 1
  except OverflowError:
    return logx
Пример #27
0
def planck(nu):
    T=2.936E7
    h=6.626E-27
    k=1.381E-16
    norm=1E-82
    #units: erg/s/cm^2/sr/Hz
    #divide by h*nu to get # photons instead of their energy
    #final units: #/cm^2/s/Hz (sr are fixed in norm)
    power = nu**2/math.expm1(h*nu/(k*T))
    #planck peak is about 2 oom higher than sync peak
    power *= norm
    return power
Пример #28
0
def ciclo_May(r_species, rM, period, inctermMay, Nindivs, K):
    rspneq = calc_r_periodo_vh(abs(r_species),invperiod)
    signosp = signfunc(r_species)
    termEq = Nindivs * (signosp-(Nindivs/K))
    rcal = r_species*((1-(Nindivs/K)) + rM)
    if (abs(termEq)> 1):
        #incEq = np.random.binomial(round(abs(termEq)),1-math.exp(-rspneq))
        incEq = np.random.binomial(round(abs(termEq)),-math.expm1(-rspneq))
    else:
        incEq = 0
    ret = [Nindivs + signfunc(termEq)*incEq + abs(inctermMay),signfunc(termEq)*rcal]
    return(ret)
Пример #29
0
def val_mutMay(r_species, beta, period, N1, N2, K1):
    rspneq = calc_r_periodo_vh(abs(r_species), invperiod)
    betaMay = beta * K1 / abs(r_species)
    termEq_abs = abs(round(betaMay * N1 * N2 / K1))
    rMay = betaMay * N2 / K1
    if (termEq_abs > 1):
        #incEq = np.random.binomial(round(abs(termEq)), -math.expm1(-rspneq)) 
        incEq = np.random.binomial(termEq_abs, -math.expm1(-rspneq)) 
    else:
        incEq = 0
    ret = [incEq, rMay]
    return(ret)
Пример #30
0
def bgf_naive_expm1(x):
    """
    The expm1 function is a C standard so it should be thinly wrapped by Python.
    Its purpose is accuracy near zero but that feature is not used here,
    because the ratio is badly behaved near zero anyway.
    """
    try:
        denominator = math.expm1(x)
    except OverflowError as e:
        return 0
    else:
        return x / denominator
Пример #31
0
# place `import` statement at top of the program
from math import expm1

# don't modify this code, otherwise, `x` may not be available
x = int(input())
print(expm1(x))
# use expm1() here
Пример #32
0
 def expm1_type(x):
     from math import expm1
     return expm1(x)
 def helper2(x):
     return expm1(x) / x if x != 0. else 1.
Пример #34
0
from flask import Flask

from datetime import datetime, timedelta

from gevent.pool import Pool

from opentracing_utils import trace, extract_span_from_kwargs

from app.config import MAX_QUERY_TIME_SLICE, UPDATER_CONCURRENCY
from app.extensions import db
from app.libs.zmon import query_sli

from .models import IndicatorValue, Indicator
from .models import insert_indicator_value

MIN_VAL = math.expm1(1e-10)

logger = logging.getLogger(__name__)

updater_pool = Pool(UPDATER_CONCURRENCY)


def update_all_indicators(app: Flask):
    """
    Update all indicators async!
    """
    if os.environ.get('SLR_LOCAL_ENV'):
        warnings.warn(
            'Running on local env while not setting up gevent properly!')

    for indicator in Indicator.query.all():
Пример #35
0
 def calc_force(self):
     self.strain = 0.0
     self.force = 0.0
     self.energy = 0.0
     self.effective_spring_constant = 0.0
     if self.broken:
         return
     else:
         position_start = self.node_start.position
         position_end = self.node_end.position
         delta_position = [
             x_end - x_start
             for x_start, x_end in zip(position_start, position_end)
         ]
         length = math.sqrt(sum([dx * dx for dx in delta_position]))
         delta_length = length - self.rest_length
         self.strain = delta_length / self.rest_length
         #
         if delta_length == 0.0:
             self.effective_spring_constant = self.spring_constant_at_rest()
             return
         elif delta_length > 0.0:
             DL = delta_length
             force_length_type = self.force_length_type_tension
             force_length_parameters = self.force_length_parameters_tension
             force_sign = +1.0
         elif delta_length < 0.0:
             DL = abs(delta_length)
             force_length_type = self.force_length_type_compression
             force_length_parameters = self.force_length_parameters_compression
             force_sign = -1.0
         #
         if force_length_type == 0:  #none
             return
         elif force_length_type == 1:  #polynomial
             self.force = sum([
                 k * math.pow(DL, p)
                 for p, k in enumerate(force_length_parameters, start=1)
             ])
             self.energy = sum([
                 k * math.pow(DL, p + 1) / (p + 1)
                 for p, k in enumerate(force_length_parameters, start=1)
             ])
             self.effective_spring_constant = sum([
                 k * math.pow(DL, p - 1) / p
                 for p, k in enumerate(force_length_parameters, start=1)
             ])
         elif force_length_type == 2:  #exponential
             self.force = sum([
                 k * math.expm1(p * DL)
                 for k, p in zip(force_length_parameters[0::2],
                                 force_length_parameters[1::2])
             ])
             self.energy = sum([
                 k * ((math.expm1(p * DL) / p) - DL)
                 for k, p in zip(force_length_parameters[0::2],
                                 force_length_parameters[1::2])
             ])
             self.effective_spring_constant = sum([
                 k * p * math.exp(p * DL)
                 for k, p in zip(force_length_parameters[0::2],
                                 force_length_parameters[1::2])
             ])
         elif force_length_type == 3:  #powerlaw
             self.force = sum([
                 k * math.pow(DL, p)
                 for k, p in zip(force_length_parameters[0::2],
                                 force_length_parameters[1::2])
             ])
             self.energy = sum([
                 k * math.pow(DL, p + 1.0) / (p + 1.0)
                 for k, p in zip(force_length_parameters[0::2],
                                 force_length_parameters[1::2])
             ])
             self.effective_spring_constant = sum([
                 k * math.pow(DL, p - 1.0) / p
                 for k, p in zip(force_length_parameters[0::2],
                                 force_length_parameters[1::2])
             ])
         self.force *= force_sign
     return
Пример #36
0
 def expm1(self):
     self.result = False
     self.current = math.expm1(float(txtDisplay.get()))
     self.display(self.current)
Пример #37
0
math.copysign(x, y) 	# Return x with the sign of y. On a platform that supports signed zeros, copysign(1.0, -0.0) returns -1.0.
math.fabs(x) 	# Return the absolute value of x.
math.factorial(x) 	# Return x factorial. Raises ValueError if x is not integral or is negative.
math.floor(x) 	# Return the floor of x as a float, the largest integer value less than or equal to x.
math.fmod(x, y) 	# Return fmod(x, y), as defined by the platform C library. Note that the Python expression x % y may not return the same result
math.frexp(x) 	# Return the mantissa and exponent of x as the pair (m, e). m is a float and e is an integer such that x == m * 2**e exactly	#
math.fsum(iterable) #  Return an accurate floating point sum of values in the iterable
math.isinf(x) 	# Check if the float x is positive or negative infinity.
math.isnan(x) 	# Check if the float x is a NaN (not a number). 
math.ldexp(x, i) 	# Return x * (2**i). This is essentially the inverse of function frexp().
math.modf(x) 	# Return the fractional and integer parts of x. Both results carry the sign of x and are floats.
math.trunc(x) 	# Return the Real value x truncated to an Integral (usually a long integer). Uses the __trunc__ method.

################# Power and logarithmic functions #################
math.exp(x) 	# Return e**x.
math.expm1(x) 	# Return e**x - 1.
math.log(x[, base]) 
# With one argument, return the natural logarithm of x (to base e).
# With two arguments, return the logarithm of x to the given base, calculated as log(x)/log(base).
math.log10(x) 	# Return the base-10 logarithm of x. This is usually more accurate than log(x, 10).
math.pow(x, y) 	# Return x raised to the power y
math.sqrt(x) 	# Return the square root of x.

################# Trigonometric functions #################
math.acos(x) 	# Return the arc cosine of x, in radians.
math.asin(x) 	# Return the arc sine of x, in radians.
math.atan(x) 	# Return the arc tangent of x, in radians.
math.atan2(y, x) 	# Return atan(y / x), in radians
math.cos(x) 	# Return the cosine of x radians.
math.hypot(x, y) 	# Return the Euclidean norm, sqrt(x*x + y*y). This is the length of the vector from the origin to point (x, y).
math.sin(x) 	# Return the sine of x radians.
Пример #38
0
import math

x = 0.0000000000000000000000001

print(x)
print(math.exp(x) - 1)
print(math.expm1(x))
datain = []
from math import sin, cos, sqrt, exp, expm1
R = 128
for tl in range(628):
    t = tl / 100
    x, y = R * sin(t) + 256, R * cos(t) + 256
    rd = (int(x), int(y), 255)
    datain.append(rd)

data = sorted(sorted(set(datain), key=lambda x: x[0]),
              key=lambda y: y[1])  #sortxy(datain)

en = len(data)
nl = 0
cx, cy = int(bmpw / 2), int(bmph / 2)
print(cx, cy)
sgm = 1
ampltd = 16777215  #2**24-1

for r in range(bmph):
    for p in range(bmpw):
        v = ((p - cx)**2 + (r - cy)**2)**.5
        dpf = ampltd * (expm1(-(
            (v - R) / 4 / sgm)**2) + 1)  #*(1/(sgm*sqrt(6.2831853)))
        s4 = round(dpf)
        #print (p,r,v,byencdltl(s4,int(3)),dpf)
        fout.write(1 * byencdltl(s4, int(3)))
        #print (nl,s4)

fout.close()
Пример #40
0
# Out: 1.2626272556789115
math.hypot(1, 2)  # returns the Euclidean norm, same as math.sqrt(a*a + b*b)
# Out: 2.23606797749979

print(math.hypot(5 - 1,
                 4 - 1))  #distance between two points (x1, y1) & (x2, y2)
#math.hypot(x2-x1, y2-y1)

math.exp(0)  # 1.0
math.exp(1)  # 2.718281828459045 (e)

math.log(5)  # = 1.6094379124341003
math.log(5, math.e)  # = 1.6094379124341003
cmath.log(5)  # = (1.6094379124341003+0j)  Default is math.e
math.log(1000, 10)  # 3.0 (always returns float)
cmath.log(1000, 10)  # (3+0j)

# Logarithm base 2
math.log2(8)  # = 3.0

# Logarithm base 10
math.log10(100)  # = 2.0
cmath.log10(100)  # = (2+0j)

print(math.log1p(pow(10, -16)))  # = 1e-16  log(x+1)
print(math.log(1 + pow(10, -16)))  # = 0.0

print(math.expm1(math.exp(-15)))  # = 1e-16  exp(x) - 1
print(math.exp(-15) - 1)  # = 0.0
print(math.exp(-15) - 1.0)
Пример #41
0
# coding: utf-8

import math

print(math.expm1(0))
Пример #42
0
     'tolerance' : 3
 },
 'exp2' : {
     'arg_types' : [F, F],
     'function_type': 'ttt',
     'values' : [
         [1.0,  2 ** 0.95, 2 ** pi, 2 ** -pi, float("inf"), float.fromhex('0x1.146b7fd8431e3p+3')], # Result
         [0.0, 0.95, pi, -pi, float("inf"), float.fromhex('0x1.8e2cp+1')]  # Arg0
     ],
     'tolerance' : 3
 },
 'expm1' : {
     'arg_types' : [F, F],
     'function_type': 'ttt',
     'values' : [
         [0.0, expm1(0.95), expm1(pi), expm1(-pi), float("inf"), float.fromhex('0x1.56fe8a160893ep+4')], # Result
         [0.0, 0.95, pi, -pi, float("inf"), float.fromhex('0x1.8e2cp+1')]  # Arg0
     ],
     'tolerance' : 3
 },
 'fabs' : {
     'arg_types' : [F, F],
     'function_type': 'ttt',
     'values' : [
         [0.0, pi/2,  pi,  0.0, float("inf"),  float("inf"), 1.12345 ], # Result
         [0.0, -pi/2, pi, -0.0, float("-inf"), float("inf"), -1.12345] # Arg0
     ],
     'tolerance' : 0
 },
 'fdim' : {
     'arg_types' : [F, F, F],
Пример #43
0
def main():
    global c
    splitEmail()
    prob = 1
    ds = dh = 0
    """ Opens the test file"""
    linesTest = open(testFileName).read().splitlines()
    for z in linesTest:
        probListSpam = []
        probListHam = []
        featuresTest = z.split(" ")
        localDict = {}
        for y in xrange(len(featuresTest)):
            if y == 0 or y == 1:
                continue
            if featuresTest[y].isalpha():
                if featuresTest[y] not in localDict:
                    localDict.update({featuresTest[y]: featuresTest[y + 1]})
        """Every word in the data is taken into consideration """
        for x in repeatWords.keys():
            if x in localDict.keys():
                if x in globalDictSpam.keys():
                    if x not in globalDictHam:
                        th = 0
                        ts = globalDictSpam[x]
                        probWordInSpam = float(ts) / float(repeatWords[x])
                        probWordInHam = 0
                    else:
                        th = globalDictHam[x]
                        ts = globalDictSpam[x]
                        probWordInSpam = float(ts) / float(repeatWords[x])
                        probWordInHam = float(th) / float(repeatWords[x])
                    finalProbSpam = probWordInSpam / (probWordInSpam +
                                                      probWordInHam)
                else:
                    finalProbSpam = 0
                if x in globalDictHam.keys():
                    if x not in globalDictSpam:
                        ts = 0
                        th = globalDictHam[x]
                        probWordInSpam = 0
                        probWordInHam = float(th) / float(repeatWords[x])
                    else:
                        th = globalDictHam[x]
                        ts = globalDictSpam[x]
                        probWordInSpam = float(ts) / float(repeatWords[x])
                        probWordInHam = float(th) / float(repeatWords[x])
                    finalProbHam = probWordInHam / (probWordInSpam +
                                                    probWordInHam)
                else:
                    finalProbHam = 0
            else:
                finalProbSpam = 0
                finalProbHam = 0
            """Smoothing is also taken into consideration"""
            smoothFinalProbSpam = ((smoothingValue * 0.5) +
                                   (finalProbSpam * float(globalDict[x]))) / (
                                       smoothingValue + float(globalDict[x]))
            smoothFinalProbHam = ((smoothingValue * 0.5) +
                                  (finalProbHam * float(globalDict[x]))) / (
                                      smoothingValue + float(globalDict[x]))

            probListSpam.append(smoothFinalProbSpam)
            # print "smoothFinalProbSpam",smoothFinalProbSpam
            probListHam.append(smoothFinalProbHam)
            # print "smoothFinalProbHam",smoothFinalProbHam
        """Naive Bayes theorm is applied to calculate the spam. Log is applied to avoid the huge numbers."""
        sumSpam = 0
        for l in xrange(len(probListSpam)):
            sumSpam = sumSpam + math.log1p(1 - probListSpam[l]) - math.log1p(
                probListSpam[l])

        exponentialSpam = expm1(sumSpam)

        decisionSpam = 1.0 / (1.0 + float(exponentialSpam))

        # print decisionSpam

        sumHam = 0
        for l in xrange(len(probListHam)):
            sumHam = sumHam + math.log1p(1 - probListHam[l]) - math.log1p(
                probListHam[l])

        exponentialHam = expm1(sumHam)

        decisionHam = 1.0 / (1.0 + float(exponentialHam))

        # print decisionHam

        if decisionSpam >= decisionHam:
            # print "spam"
            ds = ds + 1
            if featuresTest[1].strip() == "spam":
                c = c + 1
        else:
            dh = dh + 1
            # print "ham",featuresTest[1]
            if featuresTest[1].strip() == "ham":
                c = c + 1

    ch = cs = 0
    for test in linesTest:
        testLine = test.split(" ")
        if testLine[1] == "ham":
            ch = ch + 1
            # print testLine[0]
        else:
            cs = cs + 1

    print "success Rate", math.ceil(float(c) * 100 / float((ch + cs)))
Пример #44
0
def index():
    data = request.json
    df = pd.DataFrame(data, index=[0])
    prediction = model.predict(transformer.transform(df))
    predicted_price = expm1(prediction.flatten()[0])
    return jsonify({"price": str(predicted_price)})
Пример #45
0
def main():
    a = 5.873
    b = 4
    c = -2.7

    # number-theoretic and representation functions
    # https://docs.python.org/2/library/math.html#number-theoretic-and-representation-functions
    print("\nNumber-theoretic and representation functions")

    y = math.ceil(a)
    print("math.ceil({}) = {}".format(a, y))

    y = math.floor(a)
    print("math.floor({}) = {}".format(a, y))

    y = math.copysign(a, c)
    print("math.copysign({}, {}) = {}".format(a, c, y))

    y = math.fabs(c)
    print("math.fabs({}) = {}".format(c, y))

    y = math.factorial(b)
    print("math.factorial({}) = {}".format(b, y))

    y = math.fmod(a, b)
    print("math.fmod({}, {}) = {}".format(a, b, y))

    (y, z) = math.frexp(c)
    print("math.frexp({}) = ({}, {})".format(c, y, z))

    y = math.fsum([.1,.2,.3,.4,.5,.6,.7,.8,.9])
    print("math.fsum([.1,.2,.3,.4,.5,.6,.7,.8,.9]) = {}".format(y))

    y = math.isfinite(a)
    print("math.isfinite({}) = {}".format(a, y))

    y = math.isinf(a)
    print("math.isinf({}) = {}".format(a, y))

    y = math.isnan(c)
    print("math.isnan({}) = {}".format(c, y))

    y = math.ldexp(c, b)
    print("math.ldexp({}, {}) = {}".format(c, b, y))

    y = math.modf(a)
    print("math.modf({}) = {}".format(a, y))

    y = math.trunc(a)
    print("math.trunc({}) = {}".format(a, y))

    # Power and logarithmic functions
    print("\nPower and logarithmic functions")

    y = math.exp(b)
    print("math.exp({}) = {}".format(b, y))

    y = math.expm1(b)
    print("math.expm1({}) = {}".format(b, y))

    y = math.log(a)
    print("math.log({}) = {}".format(a, y))

    y = math.log1p(a)
    print("math.log1p({}) = {}".format(a, y))

    y = math.log2(a)
    print("math.log2({}) = {}".format(a, y))

    y = math.log10(a)
    print("math.log10({}) = {}".format(a, y))

    y = math.pow(a, b)
    print("math.pow({}, {}) = {}".format(a, b, y))

    y = math.sqrt(b)
    print("math.sqrt({}) = {}".format(b, y))

    # Trigonometric functions
    print("\nTriginometric functions")

    a = 0.24235
    b = 0.5953

    y = math.acos(a)
    print("math.acos({}) = {}".format(a, y))

    y = math.asin(a)
    print("math.asin({}) = {}".format(a, y))

    y = math.atan(a)
    print("math.atan({}) = {}".format(a, y))

    y = math.atan2(a,b)
    print("math.atan2({},{}) = {}".format(a, b, y))
    
    a = 90
    b = 15

    y = math.sin(a)
    print("math.sin({}) = {}".format(a, y))

    y = math.cos(a)
    print("math.cos({}) = {}".format(a, y))

    y = math.tan(a)
    print("math.tan({}) = {}".format(a, y))

    y = math.hypot(a, b)
    print("math.hypot({}, {}) = {}".format(a, b, y))

    # Angular conversion
    print("\nAngular conversion")

    a = 0.83

    y = math.degrees(a)
    print("math.degrees({}) = {}".format(a, y))

    y = math.radians(b)
    print("math.radians({}) = {}".format(b, y))

    # Hyperbolic functions
    print("\nHyperbolic functions")

    a = 90

    y = math.acosh(b)
    print("math.acosh({}) = {}".format(b, y))

    y = math.asinh(a)
    print("math.asinh({}) = {}".format(a, y))

    y = math.atanh(0.53)
    print("math.atanh({}) = {}".format(0.53, y))

    y = math.cosh(b)
    print("math.cosh({}) = {}".format(b, y))

    y = math.sinh(a)
    print("math.sinh({}) = {}".format(a, y))

    y = math.tanh(b)
    print("math.tanh({}) = {}".format(b, y))

    # Special functions
    print("\nSpecial functions")

    a = 34

    y = math.erf(a)
    print("math.erf({}) = {}".format(a, y))

    y = math.erfc(a)
    print("math.erfc({}) = {}".format(a, y))

    y = math.gamma(a)
    print("math.gamma({}) = {}".format(a, y))

    y = math.lgamma(a)
    print("math.lgamma({}) = {}".format(a, y))
Пример #46
0
    def refine_probs(self):
        """ refine_probs()

            Improve the estimated probabilities used by working with
            the full set of data allocated to each node, rather than 
            just the initial sub-set used to create/split nodes.
        """
        # travel up from leaves improving log_rk etc.

        for level_it in range(len(self.assignments) - 1, -1, -1):

            for node_it in self.nodes[level_it]:
                node = self.nodes[level_it][node_it]

                if node.tree_terminated:
                    if node.nk > 1:
                        # log_rk, etc are accurate
                        node.log_dk = node.true_bhc.root_node.log_dk
                        node.log_pi = node.true_bhc.root_node.log_pi
                        node.logp = node.true_bhc.root_node.logp
                        node.log_ml = node.true_bhc.root_node.log_ml
                        node.log_rk = node.true_bhc.root_node.log_rk
                    else:
                        node.log_dk = self.crp_alpha
                        node.log_pi = 0.
                        node.logp = self.data_model.\
                                    log_marginal_likelihood(node.data,
                                                    node.data_uncerts)
                        node.log_ml = node.logp
                        node.log_rk = 0.

                elif node.truncation_terminated:
                    node.log_dk = (math.log(self.crp_alpha) +
                                   math.lgamma(node.nk))
                    node.log_pi = 0.
                    node.logp = self.data_model.\
                                    log_marginal_likelihood(node.data,
                                                    node.data_uncerts)
                    node.log_ml = node.logp
                    node.log_rk = 0.

                else:
                    left_child = self.nodes[level_it + 1][node_it * 2]
                    right_child = self.nodes[level_it + 1][node_it * 2 + 1]

                    node.log_dk = np.logaddexp(
                        math.log(self.crp_alpha) + math.lgamma(node.nk),
                        left_child.log_dk + right_child.log_dk)

                    node.log_pi = -math.log1p(
                        math.exp(left_child.log_dk + right_child.log_dk -
                                 math.log(self.crp_alpha) -
                                 math.lgamma(node.nk)))

                    if node.log_pi == 0:
                        q = (left_child.log_dk + right_child.log_dk -
                             math.log(self.crp_alpha) - math.lgamma(node.nk))
                        neg_pi = q
                    else:
                        neg_pi = math.log(-math.expm1(node.log_pi))

                    node.logp = self.data_model.\
                                    log_marginal_likelihood(node.data,
                                                    node.data_uncerts)

                    node.log_ml = np.logaddexp(
                        node.log_pi + node.logp,
                        neg_pi + left_child.log_ml + right_child.log_ml)
                    node.log_rk = (node.log_pi + node.logp - node.log_ml)

        # travel down from top improving

        for level_it in range(1, len(self.assignments)):
            for node_it in self.nodes[level_it]:
                node = self.nodes[level_it][node_it]
                parent_node = self.nodes[level_it - 1][int(node_it / 2)]

                node.prev_wk = (parent_node.prev_wk *
                                (1 - math.exp(parent_node.log_rk)))
Пример #47
0
if not round(3.4) == 3:
    fail("round(3.4) == 3")
if not round(3.6) == 4:
    fail("round(3.6) == 4")
if not round(-3.4) == -3:
    fail("round(-3.4) == -3")
if not round(-3.6) == -4:
    fail("round(-3.6) == -4")
#
# Power and logarithmic functions
#
if not math.exp(0) == 1:
    fail("math.exp(0) == 1")
if not math.isclose(math.exp(1), math.e):
    fail("isclose(math.exp(1), math.e)")
if not math.isclose(math.exp(1) - 1, math.expm1(1)):
    fail("isclose(math.exp(1) - 1, math.expm1(1))")
if not math.isclose(math.log(1), 0):
    fail("math.isclose(math.log(1), 0)")
if not math.isclose(math.log(math.e), 1):
    fail("math.isclose(math.log(math.e), 1)")
if not math.isclose(math.log1p(1), math.log(2)):
    fail("math.isclose(math.log1p(1),math.log(2))")
if not math.isclose(math.log2(5.5), math.log(5.5) / math.log(2)):
    fail("math.isclose(math.log2(5.5),math.log(5.5)/math.log(2))")
if not math.isclose(math.log10(5.5), math.log(5.5) / math.log(10)):
    fail("math.isclose(math.log10(5.5),math.log(5.5)/math.log(10))")
if not math.isclose(math.pow(2.2, 3.3), math.exp(math.log(2.2) * 3.3)):
    fail("math.isclose(pow(2.2,3.3),math.exp(math.log(2.2) * 3.3))")
if not math.sqrt(4) == 2:
    fail("math.sqrt(4) == 2")
Пример #48
0
 def expm1(x):
     if isinstance(x, vector):
         return vector([math.expm1(element) for element in x])
     else:
         return math.expm1(float(x))
Пример #49
0
assert my_isclose(10**1j, (-0.6682015101903132 + 0.7439803369574931j))
assert my_isclose(10.5**(3 + 1j), (-814.610144261598 + 822.4998197514079j))

assert my_isclose(math.e**1j, (0.5403023058681398 + 0.8414709848078965j))

assert my_isclose((1 + 2j)**1j, (0.2291401859804338 + 0.23817011512167555j))

# issue 924
assert math.gcd(234, 78) == 78

# issue 1108
assert math.copysign(1.0, -0.0) == -1.0

# issue 1109
assert my_isclose(math.expm1(1e-5), 0.000010000050000166668), math.expm1(1e-5)

# issue 1110
assert math.log10(1000) == 3.0

# issue 1111
log1p = math.log1p(1e-5)
assert (log1p == 0.00000999995000033333 or  # CPython, Edge
        log1p == 0.000009999950000333332)  # Firefox, Chrome

# issue 1112
assert math.gamma(2) == 1.0

# issue 1113
assert math.lgamma(2) == 0.0
Пример #50
0
def expm1 ( x ) :
    """ 'expm1' function taking into account the uncertainties
    """
    fun = getattr ( x , '__expm1__' , None )
    if fun : return fun()
    return math.expm1 ( x )
Пример #51
0
print 'log1p(x):', math.log1p(x)
print

# exp(x)计算e**x
x = 2
fmt = '%.20f'
print fmt % (math.e**2)
print fmt % math.pow(math.e, 2)
print fmt % math.exp(2)
print

# expm1()计算e**x-1
x = 0.00000000000000000000000000000001
print 'x       :', x
print 'exp(x)-1:', math.exp(x) - 1
print 'expm1(x):', math.expm1(x)
print

#5.4.8角
print '5.4.8角'
#把角度转换为弧度,可以使用randians()
print ' '.join(['{:^7}'] * 3).format('Degrees', 'Radians', 'Expected')
print ' '.join(['{:-^7}'] * 3).format('', '', '')
for deg, expected in [
    (0, 0),
    (30, math.pi / 6),
    (45, math.pi / 4),
    (60, math.pi / 3),
    (90, math.pi / 2),
    (180, math.pi),
    (270, 3 / 2.0 * math.pi),
 def H_inv(x):  # x >= 0
     dx = d * x  # >= 0
     t = dx * r
     if (t <= -1):
         return float('inf')
     return a * expm1(helper1(t) * dx)  # >= 0
Пример #53
0
    def __abs__(self):
        return self if self >= 0.0 else -self


##  Convenience Functions  #################################################
Var = lambda x: Num(x, 1.0)
d = lambda x: getattr(x, 'dx', 0.0)

##  Math Module Functions and Constants  ###################################
sqrt = lambda u: Num(math.sqrt(u), d(u) / (2.0 * math.sqrt(u)))
log = lambda u: Num(math.log(u), d(u) / float(u))
log2 = lambda u: Num(math.log2(u), d(u) / (float(u) * math.log(2.0)))
log10 = lambda u: Num(math.log10(u), d(u) / (float(u) * math.log(10.0)))
log1p = lambda u: Num(math.log1p(u), d(u) / (float(u) + 1.0))
exp = lambda u: Num(math.exp(u), math.exp(u) * d(u))
expm1 = lambda u: Num(math.expm1(u), math.exp(u) * d(u))
sin = lambda u: Num(math.sin(u), math.cos(u) * d(u))
cos = lambda u: Num(math.cos(u), -math.sin(u) * d(u))
tan = lambda u: Num(math.tan(u), d(u) / math.cos(u)**2.0)
sinh = lambda u: Num(math.sinh(u), math.cosh(u) * d(u))
cosh = lambda u: Num(math.cosh(u), math.sinh(u) * d(u))
tanh = lambda u: Num(math.tanh(u), d(u) / math.cosh(u)**2.0)
asin = lambda u: Num(math.asin(u), d(u) / math.sqrt(1.0 - float(u)**2.0))
acos = lambda u: Num(math.acos(u), -d(u) / math.sqrt(1.0 - float(u)**2.0))
atan = lambda u: Num(math.atan(u), d(u) / (1.0 + float(u)**2.0))
asinh = lambda u: Num(math.asinh(u), d(u) / math.hypot(u, 1.0))
acosh = lambda u: Num(math.acosh(u), d(u) / math.sqrt(float(u)**2.0 - 1.0))
atanh = lambda u: Num(math.atanh(u), d(u) / (1.0 - float(u)**2.0))
radians = lambda u: Num(math.radians(u), math.radians(d(u)))
degrees = lambda u: Num(math.degrees(u), math.degrees(d(u)))
hypot = lambda u, v: Num(math.hypot(u, v),
Пример #54
0
def infer_ml_graph_nodes(log_p01, sample_names, mut_keys, gene_names=None, max_no_mps=None):
    """
    Infer maximum likelihood using bayesian inference for each possible mutation pattern
    :param log_p01: posterior: log probability that VAF = 0, log probability that VAF > 0
    :param sample_names:
    :param mut_keys: list with information about the variant
    :param gene_names: list with the names of the genes in which the variant occurred
    :param max_no_mps: maximal number of MPs per variant that are considered in the MILP; by default the full solution
                       space is considered and hence 2^(#samples) of MPs are generated
    :return dictionary of nodes and corresponding variants, pattern reliability scores, weights of patterns of variants
    """

    assert max_no_mps is None or max_no_mps > 0, 'At least one mutation pattern per variant has to be considered'

    n = len(sample_names)  # number of samples
    m = len(log_p01)       # number of variants

    # presence probability of a variant for calculating reliability score
    # is upper bounded because the same variant could have been independently acquired twice
    max_pre_llh = math.log(def_sets.MAX_PRE_PROB)
    not_max_pre_llh = 1.0 - max_pre_llh

    # absence probability of a variant for calculating reliability score
    # should be upper bounded because the variant could have been lost by LOH
    # for most sequencing depth this lower bound is irrelevant
    max_abs_llh = math.log(def_sets.MAX_ABS_PROB)
    not_max_abs_llh = 1.0 - max_abs_llh

    node_scores = dict()    # mutation patterns score summed over all variants

    # weight per inferred mutation pattern per variant given the p0's and p1's in each sample for a variant
    mp_weights = list()
    # mutation pattern to the corresponding column id in the weight matrix
    mp_col_ids = dict()
    # column id to corresponding mutation pattern
    idx_to_mp = list()

    trunk_mp = frozenset([sa_idx for sa_idx in range(n)])

    mp_idx = 0
    if max_no_mps is None:
        # generate all possible mutation patterns for <n> given samples and index them
        for no_pres_vars in range(0, n+1):     # number of present variants in the generated MPs (mutation patterns)
            # generate all mps with length no_pres_vars
            for mp in combinations(range(n), no_pres_vars):
                node = frozenset(mp)        # create mutation pattern
                idx_to_mp.append(node)
                mp_col_ids[node] = mp_idx

                mp_idx += 1
    else:
        # generate the <max_no_mps> most likely mutation patterns of each variant
        for mut_idx in range(m):
            mlog_pre_probs = list()
            mlog_abs_probs = list()
            for sa_idx in range(n):
                mlog_pre_probs.append(-min(log_p01[mut_idx][sa_idx][1], max_pre_llh))
                mlog_abs_probs.append(-min(log_p01[mut_idx][sa_idx][0], max_abs_llh))

            for i, (mlog_prob, ml_mp, not_flipped_sas) in enumerate(
                    _get_ml_mps(n, mlog_pre_probs, mlog_abs_probs, max_no_mps)):

                # check if this pattern already belongs to one of the most likely one for another variant
                if ml_mp not in mp_col_ids:
                    idx_to_mp.append(ml_mp)
                    mp_col_ids[ml_mp] = mp_idx
                    mp_idx += 1

                # logger.debug('{} {}th: {} {:.3f}'.format(gene_names[mut_idx], i + 1, ml_mp, math.exp(-mlog_prob)))

        logger.info('Only {} mutation patterns will be explored in total.'.format(mp_idx))

    # calculate the reliability scores for all as above as relevant determined mutation patterns
    for mut_idx in range(m):

        mp_weights.append(dict())

        for mp_idx, node in enumerate(idx_to_mp):

            log_ml = 0.0                # log maximum likelihood of the inferred pattern
            for sa_idx in node:                         # variant is present
                log_ml += min(log_p01[mut_idx][sa_idx][1], max_pre_llh)
            for sa_idx in trunk_mp.difference(node):    # variant is absent
                log_ml += min(log_p01[mut_idx][sa_idx][0], max_abs_llh)

            if log_ml == 0.0:   # numerical artifact, use approximation: ignore second order term
                for sa_idx in node:                         # variant is present
                    log_ml += math.exp(max(log_p01[mut_idx][sa_idx][0], not_max_pre_llh))     # sum probability
                for sa_idx in trunk_mp.difference(node):    # variant is absent
                    log_ml += math.exp(max(log_p01[mut_idx][sa_idx][1], not_max_abs_llh))

                log_ml = np.log1p(-log_ml)      # calculates log(1+argument)
                if gene_names is not None:
                    logger.debug('Approximated log probability of variant {} having pattern {} by {:.2e}.'.format(
                        gene_names[mut_idx], node, log_ml))
                else:
                    logger.debug('Approximated log probability of variant {} having pattern {} by {:.2e}.'.format(
                        mut_keys[mut_idx], node, log_ml))
                if log_ml == 0.0:
                    if len(node) == 0 or len(node) == 1 or len(node) == n:
                        logger.debug('Underflow warning. Set probability to minimal float value!')
                    else:
                        logger.warning('Underflow error. Set probability to minimal float value!')
                    log_ml = -200

                assert log_ml < 0.0, ('Underflow error while calculating the probability that the ' +
                                      'variant {} does not have pattern {}.'.format(
                                       mut_keys[mut_idx], ', '.join(sample_names[sa_idx] for sa_idx in node)))

            # if max_no_mps is not None:  # not the full solution space is explored
            #     # use heapq to keep track of the most likely <max_no_mps> of mutation patterns
            #     if len(heap) < max_no_mps:
            #         heapq.heappush(heap, (log_ml, mp_idx))
            #     elif log_ml > heap[0][0]:      # likelihood of currently considered MP is higher than smallest in heap
            #         # log likelihood of mp that is more likely for the currently considered variant
            #         heapq.heapreplace(heap, (log_ml, mp_idx))
            # else:                       # full solution space is explored, weight of every pattern is relevant
            # assign calculated log probability that this variant has this mutation pattern
            mp_weights[mut_idx][mp_idx] = log_ml

        # if max_no_mps is not None:          # most likely MPs for this variant if the solution space is limited
        #     # assign calculated log probability that this variant has this mutation pattern
        #     for log_ml, mp_idx in heap:
        #         mp_weights[mut_idx][mp_idx] = log_ml

        # run through all relevant MPs for this variant and sum their log likelihoods
        # to calculate the reliability scores
        for mp_idx, log_ml in mp_weights[mut_idx].items():
            node = idx_to_mp[mp_idx]
            # calculate the probability of a mp that no variant has this mutation pattern
            # product of (1 - the probability that a variant has this mp)
            if node in node_scores.keys():
                node_scores[node] -= math.log(-math.expm1(log_ml))
            else:
                node_scores[node] = -math.log(-math.expm1(log_ml))

    for mp_idx, node in enumerate(idx_to_mp):
        if node in node_scores and node_scores[node] == 0.0:
            if len(node) == 0 or len(node) == 1 or len(node) == len(sample_names):
                # logger.debug('Underflow warning for pattern {}. Set probability to minimal float value!'
                #       .format(', '.join(sample_names[sa_idx] for sa_idx in node)))
                pass
            else:
                # logger.warn('Underflow error for pattern {}. Set probability to minimal float value!'.format(
                #     ', '.join(sample_names[sa_idx] for sa_idx in node)))
                # raise RuntimeError(
                #     'Underflow error for pattern {}. Set probability to minimal float value!'.format(
                #         ', '.join(sample_names[sa_idx] for sa_idx in node)))
                pass

            node_scores[node] = sys.float_info.min

        # logger.debug('Variant {} has pattern {} with probability {:.1e}.'.format(
        #     gene_names[mut_idx], ', '.join(sample_names[sa_idx] for sa_idx in node), math.exp(log_ml)))

    # normalize reliability score by the number of processed variants (m)
    for node in node_scores.keys():
        node_scores[node] /= m
        if node_scores[node] == 0.0:
            node_scores[node] = sys.float_info.min

    # Show nodes with highest reliability score
    for node, score in itertools.islice(sorted(node_scores.items(), key=lambda k: -k[1]), 0, 30):
        logger.debug('Pattern {} has a normalized reliability score of {:.2e}.'.format(node, score))

    return node_scores, idx_to_mp, mp_col_ids, mp_weights
Пример #55
0
def expm1(x):
    return math.expm1(x)
Пример #56
0
def blackbody_spectrum(wl, temp):
    wlm = wl * 1e-9
    return ((3.7417715247e-16 / (wlm**5)) / math.expm1(1.438786e-2 /
                                                       (wlm * temp)))
Пример #57
0
 def expm1_call(x):
     from math import expm1
     return expm1(x)
Пример #58
0
 def expm1_phrase(x, y):
     from math import expm1
     a = expm1(x) + expm1(y)
     return a
Пример #59
0
            if ((holdout != 0) and (week >= holdout)):
                # step 2-1, calculate validation loss
                #           we do not train with the validation data so our
                #           validation loss is an accurate estimation
                #
                # holdout: train instances from day 1 to day N -1
                #            validate with instances from day N and after
                #
                loss += (max(0, p) - y)**2
                count += 1
            else:
                # step 2-2, update learner with demand information
                learner.update(x, p, y)

        count = max(count, 1)
        print('Epoch %d finished, validation RMSLE: %f, elapsed time: %s' %
              (e, sqrt(loss / count), str(datetime.now() - start)))

    #########################################################################
    # start testing, and build Kaggle's submission file #####################
    #########################################################################

    with open(submission, 'w') as outfile:
        outfile.write('id,Demanda_uni_equil\n')
        for t, date, ID, x, y in data(test, D):
            p = learner.predict(x)
            outfile.write('%s,%.3f\n' % (ID, expm1(max(0, p))))
            if ((t % 100000) == 0):
                print(t)
    print('Finished')
Пример #60
0
def main(notify=False):
    # constants
    decay_rate = -10
    neighbors = 5
    max_clones = 40
    ab_count = 20
    clones_count = 4
    affinity_threshold = 0.5 # percentage that gets OKed
    
    antigens = load_data('wdbc.data')
    init_antibodies = generate_antibodies(ab_count, antigens, neighbors, False)
    antibodies = []

    START_TIME = time.time()

    # initiate antibodies as random cases of ags
    if len(init_antibodies) > ab_count:
        for i in range(ab_count):
            ab = init_antibodies[randint(0, len(init_antibodies) - 1)]
            while ab in antibodies:
                ab = init_antibodies[randint(0, len(init_antibodies) - 1)]
            antibodies.append(ab)
        
    # gets initial affinities
    for antibody in antibodies:
        antibody.k_neighbors_affinity(neighbors, antigens)

    if notify:
        print '#'*20
        print 'INITIAL ANTIBODIES'
        for antibody in antibodies:
            print antibody
        print '#'*20

    for iteration in range(30): 
        print '\nITERATION: ', iteration
        antibodies = sorted(antibodies, key=sort_key, reverse=True)
        highest_aff_abs = []
        # Select the highest affinity
        max_ab_affinity = antibodies[0].affinity
        for antibody in antibodies:
            highest_aff_abs.append(antibody)
        
        # Getting sum; will be used for weighs
        affs_sum = 0
        for antibody in highest_aff_abs:
            affs_sum += antibody.affinity

        # Clones the best
        max_ab_affinity = highest_aff_abs[0].affinity
        min_ab_affinity = highest_aff_abs[-1].affinity
        for antibody in highest_aff_abs:
            normalized_affinity = 1
            if (max_ab_affinity-min_ab_affinity) > 0:
                normalized_affinity = (antibody.affinity-min_ab_affinity)*1.0/(max_ab_affinity-min_ab_affinity)
            n_clones = normalized_affinity*clones_count + 1
            for n in range(int(n_clones)):      
                # Creates child and mutates it.
                # If its better than the parent, go-ahead. Else, dump it
                cloned_ab = Antibody(id=antibody.id, paratope=antibody.paratope, affinity=antibody.affinity)
                value = (math.expm1(normalized_affinity*decay_rate) + 1)
                cloned_ab.mutate(value)
                cloned_ab.k_neighbors_affinity(neighbors, antigens)
                if cloned_ab.affinity >= antibody.affinity:
                    print "ITERATION", iteration, "THERE'S A BETTER CLONE: {} VERSUS {} MUTATION VALUE {}".format(antibody.affinity, cloned_ab.affinity, value)
                    antibodies.append(cloned_ab)
                else:
                    print "ITERATION", iteration, "WORSE CLONE: {} VERSUS {} MUTATION VALUE {}".format(antibody.affinity, cloned_ab.affinity, value)


        antibodies = sorted(antibodies, key=sort_key, reverse=True)
        antibodies = antibodies[:max_clones]

        for antibody in antibodies:
            print antibody.affinity,

        # removes antibodies that are too close
        antibodies_copy = antibodies[:]
        for antibody in antibodies_copy:
            for antibody2 in antibodies_copy:
                if euclidian_distance(antibody, antibody2) < 10:
                    if antibody.affinity < antibody2.affinity and antibody in antibodies:
                        antibodies.remove(antibody)


        if iteration < 100:
            # Adds new batch of randomized abs
            antibodies = antibodies + generate_antibodies(ab_count, antigens, neighbors)

        antibodies = sorted(antibodies, key=sort_key, reverse=True) 

    if notify:
        print '#'*20
        print 'FINAL ANTIBODIES'
        for antibody in antibodies:
            print antibody
        print '#'*20

    return antibodies[0]