예제 #1
0
def compute_boltzmann(energies, debug=False):
    # create Boltzmann distribution
    # https://en.wikipedia.org/wiki/Boltzmann_distribution
    # p_i = exp( -E_i / kT ) / ∑ exp( -E_j / kT )
    # where:
    #    p_i is the probability of state i occuring
    #    E_i is the energy of state i
    #    E_j is the energy of state j, where the ∑ in the denominator
    #    iterates over all states j
    # first we calculate exp( -E_i / kT ) for all states
    if len(energies) == 0:
        return []
    if debug:
        print("Boltzmann Distribution")
    divisor = bigfloat.BigFloat.exact(0.0)
    for energy in energies:
        ep = bigfloat.exp(-energy)
        if debug:
            print("energy, ep = %g, %g" % (energy, ep))
        # divisor = ∑ exp( -E_j / kT )
        divisor += ep
    probabilities = []
    for energy in energies:
        # p_i = exp( -E_i / kT ) / divisor
        numerator = bigfloat.exp(-energy)
        probability = numerator / divisor
        # save probability to dictionary
        probabilities.append(float(probability))
        if debug:
            print("%s / %s = %s" % (numerator, divisor, probability))

    return probabilities
def compute_boltzmann(energies, debug):
    # create Boltzmann distribution
    # https://en.wikipedia.org/wiki/Boltzmann_distribution
    # p_i = exp( -E_i / kT ) / ∑ exp( -E_j / kT )
    # where:
    #    p_i is the probability of state i occuring
    #    E_i is the energy of state i
    #    E_j is the energy of state j, where the ∑ in the denominator
    #    iterates over all states j
    # first we calculate exp( -E_i / kT ) for all states
    if len(energies) == 0:
        return []
    if debug:
        print("Boltzmann Distribution")
    divisor = BigFloat.exact(0.0)
    for energy in energies:
        ep = bigfloat.exp(-energy)
        if debug:
            print("energy, ep = %g, %g" % (energy, ep))
        # divisor = ∑ exp( -E_j / kT )
        divisor += ep
    probabilities = []
    for energy in energies:
        # p_i = exp( -E_i / kT ) / divisor
        numerator = bigfloat.exp(-energy)
        probability = numerator / divisor
        # save probability to dictionary
        probabilities.append(float(probability))
        if debug:
            print("%s / %s = %s" % (numerator, divisor, probability))

    return probabilities
def GauGo_eq(temp, E, E_D, T_pk, theta):
    """Gaussian - Gompertz model, used for estimating trait values at 
    a given temperature."""
    global max_trait

    function = max_trait * exp(1) ** (-E * (temp - T_pk) * (temp \
                         - T_pk) - exp(1) ** (E_D * (temp - T_pk) - theta))
    return numpy.array(map(log, function), dtype=numpy.float64)
def JoLe_eq(temp, B0, E, E_D, T_pk):
    """Johnson - Lewin model, used for estimating trait values at 
    a given temperature."""
    global K

    function = B0 * exp(1) ** (-E / (K * temp)) / (1 + exp(1) ** (1 / K
                  * (E_D / T_pk + K * log(E / (E_D - E)) - (E_D / temp))))
    return numpy.array(map(log, function), dtype=numpy.float64)
예제 #5
0
def interval_exp(interval, context_down, context_up):
    lower, upper = interval
    out_lower, out_upper = [
        bf.exp(lower, context_down),
        bf.exp(upper, context_up)
    ]
    out_interval = [out_lower, out_upper]
    derivs = [out_lower, 0], [0, out_upper]
    return out_interval, derivs
예제 #6
0
def sharpe_schoolf_eq(temp, B0, E, E_D, T_pk):
    """A function to estimate the trait value at a given temperature according 
    to the Sharpe-Schoolfield model."""
    global K

    function = B0 * exp(1)**(-E * (
        (1 / (K * temp)) -
        (1 / (K * 273.15)))) / (1 +
                                (E /
                                 (E_D - E)) * exp(1)**(E_D / K *
                                                       (1 / T_pk - 1 / temp)))
    return numpy.array(map(log, function), dtype=numpy.float64)
예제 #7
0
    def test_exp(self):
        self.assertEqual(bigfloat.Number("1"),
                         bigfloat.exp(bigfloat.Number("0")))
        self.assertEqual(
            bigfloat.Number("1") / bigfloat.exp(bigfloat.Number("1")),
            bigfloat.exp(bigfloat.Number("-1")))
        self.assertEqual(
            bigfloat.Number("1") / bigfloat.exp(bigfloat.Number("123")),
            bigfloat.exp(bigfloat.Number("-123")))
        self.assertEqual(
            bigfloat.Number("1") / bigfloat.exp(bigfloat.Number("0.3")),
            bigfloat.exp(bigfloat.Number("-0.3")))

        def test_almost_equal(a, b, bound):
            err = a - b
            self.assertGreaterEqual(bigfloat.Number(bound), err)
            self.assertLessEqual(bigfloat.Number("-" + bound), err)

        test_almost_equal(bigfloat.exp(bigfloat.Number("1")),
                          bigfloat.Number("2.71828 18284"), "0.00001")

        def test_exp_helper(a1, a2, a3, b, bound):
            a = bigfloat.exp(bigfloat.Number(a1)) * bigfloat.exp(
                bigfloat.Number(a2)) * bigfloat.exp(bigfloat.Number(a3))
            b = bigfloat.exp(bigfloat.Number(b))
            test_almost_equal(a, b, bound)

        test_exp_helper("1", "1", "1", "3", "0.0000001")
        test_exp_helper("2", "3", "7", "12", "0.0000001")
        test_exp_helper("0.2", "0.3", "0.9", "1.4", "0.0000001")
def fn_logprior(d_t,R_t,logalpha_pmf,logbeta_pmf,maxlhs,beta_Z,nruleslen,lhs_len):
    #The prior will be _proportional_ to this -> we drop the normalization for alpha
    #beta_Z is the normalization for beta, except the terms that need to be dropped due to running out of rules.
    #log p(d_star) = log \alpha(m|lbda) + sum_{i=1...m} log beta(l_i | eta) + log gamma(r_i | l_i)
    #The length of the list (m) is R_t
    #Get logalpha (length of list) (overloaded notation in this code, unrelated to the prior hyperparameter alpha)
    logprior = 0.
    logalpha = logalpha_pmf[R_t] #this is proportional to logalpha - we have dropped the normalization for truncating based on total number of rules
    logprior += logalpha
    empty_rulelens = []
    nlens = zeros(maxlhs+1)
    for i in range(R_t):
        l_i = lhs_len[d_t[i]]
        logbeta = logbeta_pmf[l_i] - log(beta_Z - sum([bigfloat.exp(logbeta_pmf[l_j]) for l_j in empty_rulelens])) #The correction for exhausted rule lengths
        #Finally loggamma
        loggamma = -log(nruleslen[l_i] - nlens[l_i])
        #And now check if we have exhausted all rules of a certain size
        nlens[l_i] += 1
        if nlens[l_i] == nruleslen[l_i]:
            empty_rulelens.append(l_i)
        elif nlens[l_i] > nruleslen[l_i]:
            raise Exception
        #Add 'em in
        logprior += logbeta
        logprior += loggamma
    #All done
    return logprior
예제 #9
0
파일: TanSig.py 프로젝트: noahs-bark/kaboom
def TanSig(x):
    # n=len(x)
    # if(n==1):
    # print x
    y = 2 / (1 + bigfloat.exp((numpy.multiply(-2, x)))) - 1
    # print y
    return y
예제 #10
0
    def partition_functions_for_each_configuration(self):
        """
        Inversely solve the free energy to get partition function for :math:`j` th configuration by

        .. math::

           Z_{j}(T, V) = \exp \\bigg( -\\frac{ F_{j}(T, V) }{ k_B T } \\bigg).

        :return: A matrix, the partition function of each configuration of each volume.
        """
        try:
            import bigfloat
        except ImportError:
            raise ImportError(
                "Install ``bigfloat`` package to use {0} object!".format(
                    self.__class__.__name__))

        with bigfloat.precision(self.precision):
            return np.array([
                bigfloat.exp(d)
                for d in  # shape = (# of volumes for each configuration, 1)
                logsumexp(
                    -self.aligned_free_energies_for_each_configuration.T /
                    (K * self.temperature),
                    axis=1,
                    b=self.degeneracies)
            ])
예제 #11
0
 def sigmoidx(x, derivative):
     # the function
     s = 1 / (1 + bfloat.exp(-x / 20, bfloat.precision(30)))
     # if derivative is true, the output is the derivative
     if derivative == True:
         return (s + s * (1 - s))
     else:
         return s
예제 #12
0
def sigmoid(s):
    y = []
    for k in s:
        z=[]
        for l in k:
            z.append( bigfloat.div( 1. , bigfloat.add(1,bigfloat.exp(-l,bigfloat.precision(precision)))))
        y.append(z)
    return np.array(y)
def schoolf_eq(temp, B0, E, E_D, T_pk):
    """Schoolfield model, used for estimating trait values at 
    a given temperature."""
    global K
    
    function = B0 * exp(1) ** (-E * ((1/(K*temp)) - (1/(K*283.15)))) / (1 + (E/(E_D - E)) * exp(1) ** (E_D / K
            * (1 / T_pk - 1 / temp)))
    return numpy.array(map(log, function), dtype=numpy.float64)
def predict_dpi(x, s):
    num = 0
    den = 0
    for i in range(len(s)):
        y_i = s[i, len(x)]
        x_i = s[i, :len(x)]
        exp = bg.exp(-0.25 * norm(x - x_i)**2)
        num += y_i * exp
        den += exp
    return num / den
def predict_dpi(x, s):

    num = 0
    den = 0
    for i in range(len(s)):
        y_i = s[i, len(x)]
        x_i = s[i, :len(x)]
        ex = bg.exp(-0.25 * (math.pow(euclidean_distance(x, x_i), 2)))
        num = bg.add(num, bg.mul(y_i, ex))
        den = bg.add(den, ex)
    return bg.div(num, den)
def GauGo(params, temp, data):
    """Gaussian - Gompertz model, used by the optimizer."""

    global max_trait

    # Get the parameters.
    E = params['E'].value
    E_D = params['E_D'].value
    T_pk = params['T_pk'].value
    theta = params['theta'].value

    # Prevent E being higher than E_D, by punishing the optimizer with huge
    # numbers when that is the case.
    if E >= E_D:
        return 1e10

    # Otherwise, fit the model!
    function = max_trait * exp(1) ** (-E * (temp - T_pk) * (temp \
                         - T_pk) - exp(1) ** (E_D * (temp - T_pk) - theta))

    return numpy.array(map(log, function) - data, dtype=numpy.float64)
def JoLe(params, temp, data):
    """Johnson - Lewin model, used by the optimizer."""

    global K

    # Get the parameters.
    E = params['E'].value
    E_D = params['E_D'].value
    T_pk = params['T_pk'].value
    B0 = params['B0'].value

    # Prevent E being higher than E_D, by punishing the optimizer with huge
    # numbers when that is the case.
    if E >= E_D:
        return 1e10

    # Otherwise, fit the model!
    function = B0 * exp(1) ** (-E / (K * temp)) / (1 + exp(1) ** (1 / K
                  * (E_D / T_pk + K * log(E / (E_D - E)) - (E_D / temp))))

    return numpy.array(map(log, function) - data, dtype=numpy.float64)
예제 #18
0
파일: mfng.py 프로젝트: horvatha/mfng
    def degdist_bigfloat(self, maxdeg, n, mindeg=0):
        """Returns the degree distribution from 0 to maxdeg degree.

        It should be use with the (iterated) link probability measure.

        Parameters:
          maxdeg: the maximal degree for we calculate the degree distribution
          n: the size of the network (the number of vertices)

        Returns:
            rho: the degree distribution as a list with length of maxdeg+1.
                The index k gives the probability of having degree k.
        """

        assert isinstance(n, int) and isinstance(maxdeg, int) and n > maxdeg
        import bigfloat
        context = bigfloat.Context(precision=10)
        divs = self.divs
        n_intervals = len(divs)
        lengths = [divs[i] - divs[i - 1] for i in xrange(1, n_intervals)]
        lengths.insert(0, divs[0])
        log_lengths = numpy.log(lengths)
        # Eq. 5, where ...
        avgdeg = [
            bigfloat.BigFloat(n * sum(
                [self.probs[i][j] * lengths[j] for j in xrange(n_intervals)]),
                              context=context) for i in xrange(n_intervals)
        ]
        #log_factorial = [ 0.5*bigfloat.log(2*math.pi, context=context) + (d+.5)*bigfloat.log(d, context=context) - d
        #                 for d in xrange(1,maxdeg+1) ]
        log_factorial = [
            bigfloat.log(bigfloat.factorial(k), context=context)
            for k in xrange(1, maxdeg + 1)
        ]
        log_factorial.insert(0, 0)

        rho = [bigfloat.BigFloat(0, context=context)] * (maxdeg + 1)
        # Eq. 4
        for i in xrange(n_intervals):
            # Eq. 5
            log_rho_i = [
                (bigfloat.mul(k, bigfloat.log(avgdeg[i]), context=context) -
                 log_factorial[k] - avgdeg[i])
                for k in xrange(mindeg, maxdeg + 1)
            ]
            log_rho_i_length = [
                log_rho_i[k] + log_lengths[i]
                for k in xrange(mindeg, maxdeg + 1)
            ]
            for k in xrange(mindeg, maxdeg + 1):
                rho[k] += bigfloat.exp(log_rho_i_length[k], context=context)
        return rho
예제 #19
0
def sharpe_schoolf(params, temp, data):
    """A function to be used by the optimizer to fit the
    Sharpe-Schoolfield model."""

    global K

    E = params['E'].value
    E_D = params['E_D'].value
    T_pk = params['T_pk'].value
    B0 = params['B0'].value

    # Penalize the optimizer when E becomes much higher than E_D.
    if E >= E_D:
        return 1e10

    function = B0 * exp(1)**(-E * (
        (1 / (K * temp)) -
        (1 / (K * 273.15)))) / (1 +
                                (E /
                                 (E_D - E)) * exp(1)**(E_D / K *
                                                       (1 / T_pk - 1 / temp)))
    return numpy.array(map(log, function) - data, dtype=numpy.float64)
예제 #20
0
def relative_likelihood_result_calculator(population):
    """
    Given a population, this function calculates the relative likelihood result (it is a way to make
    the likelihood result bigger) to every chromosome.

    Args:
        population : LIST[Chromosome(), Chromosome(), ...]
            A list filled with 'Chromosome' objects
    """
    with bf.quadruple_precision:
        total = sum_likelihood_result(population)
        for i in range(0, len(population)):
            log_likelihood_result = bf.exp(bf.BigFloat(str(population[i].get_log_likelihood_result())))
            population[i].set_relative_likelihood_result(float(bf.div(log_likelihood_result, total)))
예제 #21
0
def relative_likelihood_result_calculator(population):
    """
    Given a population, this function calculates the relative likelihood result (it is a way to make
    the likelihood result bigger) to every chromosome.

    Args:
        population : LIST[Chromosome(), Chromosome(), ...]
            A list filled with 'Chromosome' objects
    """
    with bf.quadruple_precision:
        total = sum_likelihood_result(population)
        for i in range(0, len(population)):
            log_likelihood_result = bf.exp(bf.BigFloat(str(population[i].get_log_likelihood_result())))
            population[i].set_relative_likelihood_result(float(bf.div(log_likelihood_result, total)))
def schoolf(params, temp, data):
    """Schoolfield model, used by the optimizer."""

    global K

    E = params['E'].value
    E_D = params['E_D'].value
    T_pk = params['T_pk'].value
    B0 = params['B0'].value

    if E >= E_D:
        return 1e10

    function = B0 * exp(1) ** (-E * ((1/(K*temp)) - (1/(K*283.15)))) / (1 + (E/(E_D - E)) * exp(1) ** (E_D / K
            * (1 / T_pk - 1 / temp)))
    return numpy.array(map(log, function) - data, dtype=numpy.float64)
예제 #23
0
파일: parse_data.py 프로젝트: j3ny/Analysis
def get_boltzmann_distribution(energy_by_arm):
    R = 8.3144621  # gas constant
    T = 293.15  # room temperature
    factor = 4184.0  # joules_per_kcal
    boltzmann_distribution = []
    for dG in energy_by_arm:
        ps = []
        total = bigfloat.BigFloat(0)
        for energy in dG:
            p = bigfloat.exp((-energy*factor)/(R*T), bigfloat.precision(1000))
            ps.append(p)
            total = bigfloat.add(total, p)
        normal_ps = []
        for p in ps:
            normal_ps.append(float(bigfloat.div(p,total)))
        boltzmann_distribution.append(numpy.array(normal_ps))
    return boltzmann_distribution
예제 #24
0
def predict_dpi(x, s):
    """Predict the average price change deltap_i, 1 <= i <= 3.
    Args:
        x: A numpy array of floats representing previous 180, 360, or 720 prices.
        s: A 2-dimensional numpy array generated by choose_effective_centers().
    Returns:
        A big float representing average price change deltap_i.
    """
    num = 0
    den = 0
    for i in range(len(s)):
        y_i = s[i, len(x)]
        x_i = s[i, :len(x)]
        exp = bg.exp(-0.25 * norm(x - x_i)**2)
        num += y_i * exp
        den += exp
    return num / den
예제 #25
0
def sum_likelihood_result(population):
    """
    Sums all the likelihood results values on a population.

    Args:
        population : LIST[Chromosome(), Chromosome(), ...]
            A list filled with 'Chromosome' objects

    Returns:
        INT
            The sum of all likelihood results on a population
    """
    with bf.quadruple_precision:
        total = bf.BigFloat("0.0")
        for i in range(0, len(population)):
            log_likelihood_result = bf.exp(bf.BigFloat(str(population[i].get_log_likelihood_result())))
            total = bf.add(total, log_likelihood_result)
    return total
예제 #26
0
def sum_likelihood_result(population):
    """
    Sums all the likelihood results values on a population.

    Args:
        population : LIST[Chromosome(), Chromosome(), ...]
            A list filled with 'Chromosome' objects

    Returns:
        INT
            The sum of all likelihood results on a population
    """
    with bf.quadruple_precision:
        total = bf.BigFloat("0.0")
        for i in range(0, len(population)):
            log_likelihood_result = bf.exp(bf.BigFloat(str(population[i].get_log_likelihood_result())))
            total = bf.add(total, log_likelihood_result)
    return total
def bayesdl_mcmc(numiters,thinning,alpha,lbda,eta,X,Y,nruleslen,lhs_len,maxlhs,permsdic,burnin,rseed,d_init):
    #initialize
    perms = []
    if rseed:
        random.seed(rseed)
    #Do some pre-computation for the prior
    beta_Z,logalpha_pmf,logbeta_pmf = prior_calculations(lbda,len(X),eta,maxlhs)
    if d_init: #If we want to begin our chain at a specific place (e.g. to continue a chain)
        d_t = Pickle.loads(d_init)
        d_t.extend([i for i in range(len(X)) if i not in d_t])
        R_t = d_t.index(0)
        N_t = compute_rule_usage(d_t,R_t,X,Y)
    else:
        d_t,R_t,N_t = initialize_d(X,Y,lbda,eta,lhs_len,maxlhs,nruleslen) #Otherwise sample the initial value from the prior
    #Add to dictionary which will store the sampling results
    a_t = Pickle.dumps(d_t[:R_t+1]) #The antecedent list in string form
    if a_t not in permsdic:
        permsdic[a_t][0] = fn_logposterior(d_t,R_t,N_t,alpha,logalpha_pmf,logbeta_pmf,maxlhs,beta_Z,nruleslen,lhs_len) #Compute its logposterior
    if burnin == 0:
        permsdic[a_t][1] += 1 #store the initialization sample
    #iterate!
    for itr in range(numiters):
        #Sample from proposal distribution
        d_star,Jratio,R_star,step = proposal(d_t,R_t,X,Y,alpha)
        #Compute the new posterior value, if necessary
        a_star = Pickle.dumps(d_star[:R_star+1])
        if a_star not in permsdic:
            N_star = compute_rule_usage(d_star,R_star,X,Y)
            permsdic[a_star][0] = fn_logposterior(d_star,R_star,N_star,alpha,logalpha_pmf,logbeta_pmf,maxlhs,beta_Z,nruleslen,lhs_len)
        #Compute the metropolis acceptance probability
        q = bigfloat.exp(permsdic[a_star][0] - permsdic[a_t][0] + Jratio)
        u = random.random()
        if u < q:
            #then we accept the move
            d_t = list(d_star)
            R_t = int(R_star)
            a_t = str(a_star)
            #else: pass
        if itr > burnin and itr % thinning == 0:
            ##store
            permsdic[a_t][1] += 1
            perms.append(a_t)
    return permsdic,perms
def predict_dpi(x, s):
    """Predict the average price change Δp_i, 1 <= i <= 3.

    Args:
        x: A numpy array of floats representing previous 180, 360, or 720 prices.
        s: A 2-dimensional numpy array generated by choose_effective_centers().

    Returns:
        A big float representing average price change Δp_i.
    """
    num = 0
    den = 0
    for i in range(len(s)):
        y_i = s[i, len(x)]
        x_i = s[i, :len(x)]
        exp = bg.exp(-0.25 * norm(x - x_i) ** 2)
        num += y_i * exp
        den += exp
    return num / den
예제 #29
0
    def degdist_bigfloat(self, maxdeg, n, mindeg=0):
        """Returns the degree distribution from 0 to maxdeg degree.

        It should be use with the (iterated) link probability measure.

        Parameters:
          maxdeg: the maximal degree for we calculate the degree distribution
          n: the size of the network (the number of vertices)

        Returns:
            rho: the degree distribution as a list with length of maxdeg+1.
                The index k gives the probability of having degree k.
        """

        assert isinstance(n, int) and isinstance(maxdeg, int) and n > maxdeg
        import bigfloat
        context = bigfloat.Context(precision=10)
        divs = self.divs
        n_intervals = len(divs)
        lengths = [divs[i] - divs[i-1] for i in xrange(1, n_intervals)]
        lengths.insert(0, divs[0])
        log_lengths = numpy.log(lengths)
        # Eq. 5, where ...
        avgdeg = [bigfloat.BigFloat(n*sum([self.probs[i][j]*lengths[j]
                  for j in xrange(n_intervals)]), context=context) for i in xrange(n_intervals)]
        #log_factorial = [ 0.5*bigfloat.log(2*math.pi, context=context) + (d+.5)*bigfloat.log(d, context=context) - d
        #                 for d in xrange(1,maxdeg+1) ]
        log_factorial = [bigfloat.log(bigfloat.factorial(k), context=context)
                         for k in xrange(1, maxdeg+1)]
        log_factorial.insert(0, 0)

        rho = [bigfloat.BigFloat(0, context=context)] * (maxdeg+1)
        # Eq. 4
        for i in xrange(n_intervals):
            # Eq. 5
            log_rho_i = [(bigfloat.mul(k, bigfloat.log(avgdeg[i]), context=context) - log_factorial[k] - avgdeg[i])
                         for k in xrange(mindeg, maxdeg+1)]
            log_rho_i_length = [log_rho_i[k] + log_lengths[i]
                         for k in xrange(mindeg, maxdeg+1)]
            for k in xrange(mindeg, maxdeg+1):
                rho[k] += bigfloat.exp(log_rho_i_length[k], context=context)
        return rho
예제 #30
0
def calculate_operational_niche_width(fitted_params, B_pk, T_ref):
    """Calculate the operational niche width for this fit, i.e. 
    the difference in temperatures between the thermal optimum (T_pk) 
    and the temperature at which performance is half of the 
    maximum (B_pk/2)."""

    dist_to_half_B_pk = 999999
    current_temperature = -9999

    for temp in numpy.arange(263.15, fitted_params['T_pk'].value + 0.01, 0.01):
        trait_val_at_temp = exp(1)**sharpe_schoolf_eq(
            numpy.array([temp]), fitted_params['B0'].value,
            fitted_params['E'].value, fitted_params['E_D'].value,
            fitted_params['T_pk'].value)

        current_dist = abs((B_pk / 2.0) - trait_val_at_temp)
        if current_dist < dist_to_half_B_pk:
            dist_to_half_B_pk = current_dist
            current_temperature = temp

    return fitted_params['T_pk'].value - current_temperature
예제 #31
0
    def _static_part(self) -> Vector:
        """
        Calculate the static contribution to the partition function.

        :return: The static contribution on the temperature-volume grid.
        """
        try:
            import bigfloat
        except ImportError:
            raise ImportError(
                "You need to install ``bigfloat`` package to use {0} object!".
                format(self.__class__.__name__))

        with bigfloat.precision(self.precision):
            return np.array([
                bigfloat.exp(d)
                for d in  # shape = (# of volumes for each configuration, 1)
                logsumexp(-self.static_energies / (K * self.temperature),
                          axis=1,
                          b=self.degeneracies)
            ])
예제 #32
0
 def bigfloat_exp(self, hypothesis):
     return [bf.exp(i) for i in hypothesis]
예제 #33
0
# Deal with overflow in exp using numpy
import bigfloat

bigfloat.exp(5000, bigfloat.precision(100))
# -&gt; BigFloat.exact('2.9676283840236670689662968052896e+2171', precision=100)
예제 #34
0
# Deal with overflow in exp using numpy
import bigfloat
bigfloat.exp(5000,bigfloat.precision(100))
# -&gt; BigFloat.exact('2.9676283840236670689662968052896e+2171', precision=100)
예제 #35
0
def parallel_ONW_for_bootstrap(arg):
    global temporary_dataset

    numpy.random.seed(arg)

    resampled_indices = numpy.floor(
        numpy.random.rand(len(temporary_dataset)) *
        len(temporary_dataset)).astype(int)
    resampled_dataset = numpy.array(temporary_dataset)[
        resampled_indices, :].tolist()

    (B0_start, E_start, T_pk_start,
     E_D_start) = generate_starting_values(resampled_dataset)

    temps = []
    trait_vals = []

    for row in resampled_dataset:

        # Convert temperatures to Kelvin.
        temps.append(float(row[4]) + 273.15)
        trait_vals.append(log(float(row[5])))

    # Convert temps and trait_vals to numpy arrays.
    temps = numpy.array(temps, dtype=numpy.float64)
    trait_vals = numpy.array(trait_vals, dtype=numpy.float64)

    # Prepare the parameters and their bounds.
    params = Parameters()
    params.add('B0', value=B0_start)
    params.add('E', value=E_start, min=0.00001, max=10)
    params.add('E_D', value=E_D_start, min=0.00001, max=30)
    params.add('T_pk', value=T_pk_start, min=273.15 - 10, max=273.15 + 150)

    try:

        # Try and fit!
        result = minimize(sharpe_schoolf,
                          params,
                          args=(temps, trait_vals),
                          xtol=1e-12,
                          ftol=1e-12,
                          maxfev=100000)

    except Exception:

        # If fitting failed, return None.
        result = None

    if result is not None:

        # In the highly unlikely scenario that E == E_D, add a tiny number to
        # E_D to avoid division by zero.
        if params['E'].value == params['E_D'].value:
            params['E_D'].value += 0.000000000000001

        B_pk = exp(1)**sharpe_schoolf_eq(numpy.array([params['T_pk'].value]),
                                         params['B0'].value, params['E'].value,
                                         params['E_D'].value,
                                         params['T_pk'].value)
        current_operational_niche_width = calculate_operational_niche_width(
            params, B_pk[0], T_ref=273.15)
        return (current_operational_niche_width)
    else:
        return (None)
예제 #36
0
def fit_sharpe_schoolfield(dataset, B0_start, E_start, T_pk_start, E_D_start):
    """Fit the Sharpe-Schoolfield model."""

    global K

    # Store temperatures and log-transformed trait values.
    temps = []
    trait_vals = []

    for row in dataset:

        # Convert temperatures to Kelvin.
        temps.append(float(row[4]) + 273.15)
        trait_vals.append(log(float(row[5])))

    # Convert temps and trait_vals to numpy arrays.
    temps = numpy.array(temps, dtype=numpy.float64)
    trait_vals = numpy.array(trait_vals, dtype=numpy.float64)

    # Prepare the parameters and their bounds.
    params = Parameters()
    params.add('B0', value=B0_start)
    params.add('E', value=E_start, min=0.00001, max=10)
    params.add('E_D', value=E_D_start, min=0.00001, max=30)
    params.add('T_pk', value=T_pk_start, min=273.15 - 10, max=273.15 + 150)

    try:

        # Try and fit!
        result = minimize(sharpe_schoolf,
                          params,
                          args=(temps, trait_vals),
                          xtol=1e-12,
                          ftol=1e-12,
                          maxfev=100000)

    except Exception:

        # If fitting failed, return.
        return None

    # Check if we have a covariance matrix (just in case we don't for any reason).
    if result.covar is None:
        return None

    # Since we're still here, fitting was successful!

    # In the highly unlikely scenario that E == E_D, add a tiny number to
    # E_D to avoid division by zero.
    if params['E'].value == params['E_D'].value:
        params['E_D'].value += 0.000000000000001

    # Calculate B(T_ref), B_pk, and the operational niche width.
    B_T_ref = exp(1)**sharpe_schoolf_eq(numpy.array([273.15]),
                                        params['B0'].value, params['E'].value,
                                        params['E_D'].value,
                                        params['T_pk'].value)

    B_pk = exp(1)**sharpe_schoolf_eq(numpy.array([params['T_pk'].value]),
                                     params['B0'].value, params['E'].value,
                                     params['E_D'].value, params['T_pk'].value)

    operational_niche_width = calculate_operational_niche_width(params,
                                                                B_pk[0],
                                                                T_ref=273.15)

    robjects.r("rm(list=ls())")
    robjects.r.assign("K", K)
    robjects.r.assign("covar_mat", result.covar)
    robjects.r.assign(
        "Schoolf_coeffs",
        robjects.FloatVector((params['B0'].value, params['E'].value,
                              params['E_D'].value, params['T_pk'].value)))

    B_T_ref_stderr = robjects.r(
        "deltamethod( ~ x1 / (1 + (x2/(x3 - x2)) * exp(x3 / K * (1 / x4 - 1 / 273.15))), Schoolf_coeffs, covar_mat)^2"
    )[0]
    B_T_ref_1_4_stderr = robjects.r(
        "deltamethod( ~ (x1 / (1 + (x2/(x3 - x2)) * exp(x3 / K * (1 / x4 - 1 / 273.15))))^(1/4), Schoolf_coeffs, covar_mat)^2"
    )[0]
    log_E_stderr = robjects.r(
        "deltamethod(~ log(x2), Schoolf_coeffs, covar_mat)^2")[0]
    T_pk_squared_stderr = robjects.r(
        "deltamethod(~ x4^2, Schoolf_coeffs, covar_mat)^2")[0]
    log_E_D_stderr = robjects.r(
        "deltamethod(~ log(x3), Schoolf_coeffs, covar_mat)^2")[0]
    B_pk_stderr = robjects.r(
        "deltamethod( ~ (x1 * exp((-x2) * (1/(K * x4) - 1/(K * 273.15)))) / (1 + (x2/(x3 - x2))), Schoolf_coeffs, covar_mat)^2"
    )[0]
    log_B_pk_stderr = robjects.r(
        "deltamethod( ~ log((x1 * exp((-x2) * (1/(K * x4) - 1/(K * 273.15)))) / (1 + (x2/(x3 - x2)))), Schoolf_coeffs, covar_mat)^2"
    )[0]

    [operational_niche_width_stderr, log_operational_niche_width_stderr
     ] = bootstrap_operational_niche_width(dataset)

    # Calculate the fitted trait values.
    pred = exp(1)**sharpe_schoolf_eq(temps, params['B0'].value,
                                     params['E'].value, params['E_D'].value,
                                     params['T_pk'].value)
    pred = numpy.array(pred, dtype=numpy.float64)

    # Collect measured trait values without log transformation.
    trait_vals_no_log = []
    for row in dataset:
        trait_vals_no_log.append(float(row[5]))

    # Calculate the residual sum of squares.
    residuals = trait_vals_no_log - pred
    rss = sum(residuals**2)

    # Temperatures ...
    temperatures = ""
    for element in temps:
        temperatures += str(element - 273.15) + ','
    temperatures = temperatures[:-1]

    # Trait Values ...
    trait_values = ""
    for element in trait_vals_no_log:
        trait_values += str(element) + ','
    trait_values = trait_values[:-1]

    # If, for whatever reason, the residual sum of squares is
    # 'not a number' or infinite, then return.
    if numpy.isnan(rss) or numpy.isinf(rss):
        return None

    # Calculate the total sum of squares.
    tss = sum((trait_vals_no_log - numpy.mean(trait_vals_no_log))**2)

    # Calculate the R-squared value.
    if tss == 0:
        fit_goodness = 1
    else:
        fit_goodness = 1 - (rss / tss)

    # Number of data points before and after T_pk ...
    count_temps = []
    points_before_pk = 0
    points_after_pk = 0

    for row in dataset:
        if float(row[4]) in count_temps:
            continue
        else:
            count_temps.append(float(row[4]))

            if float(row[4]) < (params['T_pk'].value - 273.15):
                points_before_pk += 1
            elif float(row[4]) > (params['T_pk'].value - 273.15):
                points_after_pk += 1

    result_line = [
        dataset[0][1], dataset[0][7], dataset[0][8], temperatures,
        trait_values, dataset[0][2],
        str(B_T_ref[0]**(1 / 4.0)),
        str(B_T_ref_1_4_stderr),
        str(log(params['E'].value)),
        str(log_E_stderr),
        str(log(operational_niche_width)),
        str(log_operational_niche_width_stderr),
        str((params['T_pk'].value - 273.15)**2),
        str(T_pk_squared_stderr),
        str(log(B_pk[0])),
        str(log_B_pk_stderr),
        str(log(params['E_D'].value)),
        str(log_E_D_stderr),
        str(fit_goodness),
        str(points_before_pk),
        str(points_after_pk)
    ]

    return result_line
def schoolfield_model(dataset, B0_start, E_start, T_pk_start, E_D_start):
    """Prepare the Schoolfield model."""

    global K

    # Store temperatures and log-transformed trait values.
    temps = []
    trait_vals = []

    for row in dataset:

        # Convert temperatures to Kelvin.
        temps.append(float(row[4]) + 273.15)
        trait_vals.append(log(float(row[5])))

    # Convert temps and trait_vals to numpy arrays.
    temps = numpy.array(temps, dtype=numpy.float64)
    trait_vals = numpy.array(trait_vals, dtype=numpy.float64)

    # Prepare the parameters and their bounds.
    params = Parameters()
    params.add('B0', value=B0_start)
    params.add('E', value=E_start, min=0.00001, max=30)
    params.add('E_D', value=E_D_start, min=0.00001, max=50)
    params.add('T_pk', value=T_pk_start, min=273.15, max=273.15 + 150)

    try:

        # Set the random seed.
        numpy.random.seed(1337)

        # Try and fit!
        minimize(schoolf, params, args=(temps, trait_vals))
    except Exception:

        # If fitting failed, return.
        return None, None, None, None

	# Since we're still here, fitting was successful!
    points_before_pk = 0
    points_after_pk = 0

	# Collect measured trait values without log transformation.
    trait_vals_no_log = []
    for row in dataset:
        trait_vals_no_log.append(float(row[5]))
        
        # Get the number of data points before and after T_pk.
        if float(row[4]) < (params['T_pk'].value - 273.15):
            points_before_pk += 1
        elif float(row[4]) > (params['T_pk'].value - 273.15):
            points_after_pk += 1

    # Calculate the estimated trait values.
    pred = exp(
        1) ** schoolf_eq(temps,
                         params['B0'].value,
                         params['E'].value,
                         params['E_D'].value,
                         params['T_pk'].value)
    pred = numpy.array(pred, dtype=numpy.float64)

    # Calculate the residual sum of squares.
    residuals = trait_vals_no_log - pred
    rss = sum(residuals ** 2)

    # If, for whatever reason, the residual sum of squares is
    # 'not a number' or infinite, then return.
    if numpy.isnan(rss) or numpy.isinf(rss):
        return None, None, None, None

    # Calculate the total sum of squares.
    tss = sum((trait_vals_no_log - numpy.mean(trait_vals_no_log)) ** 2)

    # If the total sum of squares is 0, then we have a perfect fit!
    if tss == 0:
        fit_goodness = 1

    # Otherwise, calculate the R-Squared value.
    else:
        fit_goodness = 1 - (rss / tss)

    # Get the values of the three Information Criteria for this fit.
    AIC_schoolfield = AICrss(len(temps), 4, rss)
    BIC_schoolfield = BICrss(len(temps), 4, rss)
    HQC_schoolfield = HQCrss(len(temps), 4, rss)

    # Create a list of Species, Standardised Species, Reference, Trait,
    # Latitude, Longitude ...
    result_line = [
      dataset[0][0], dataset[0][1], dataset[0][2], dataset[0][3],
      dataset[0][7], dataset[0][8]
    ]

    # Temperatures ...
    temperatures = ""
    for element in temps:
        temperatures += str(element - 273.15) + ','
    temperatures = temperatures[:-1]
    result_line.append(temperatures)

    # Trait Values ...
    trait_values = ""
    for element in trait_vals_no_log:
        trait_values += str(element) + ','
    trait_values = trait_values[:-1]
    result_line.append(trait_values)

    # B0 and B0_stderr ...
    result_line.append(str(params['B0'].value))
    result_line.append(str(params['B0'].stderr))
    
    # E and E_stderr ...
    result_line.append(str(params['E'].value))
    result_line.append(str(params['E'].stderr))

    # T_pk and T_pk_stderr ...
    result_line.append(str(params['T_pk'].value - 273.15))
    result_line.append(str(params['T_pk'].stderr))

    # E_D and E_D_stderr ...
    result_line.append(str(params['E_D'].value))
    result_line.append(str(params['E_D'].stderr))

    # No theta and theta_stderr for this model!
    result_line.append("NA")
    result_line.append("NA")
    
    # R-squared ...
    result_line.append(str(fit_goodness))
    
    # Formula ...
    result_line.append(
       "log(Trait_value) = log(B0 * exp(-E * ((1/(" + str(K) \
        + " * temp)) - (1/(" + str(K) \
        + " * 273.15))))/(1 + (E/(E_D - E)) * exp(E_D/" \
        + str(K) + " * (1/T_pk - 1/temp))))")
    
    # Model name ...
    result_line.append("Schoolfield")
    
    # Implementation ...
    result_line.append("lmfit.minimize (Python package)")
    
    # Number of data points before and after T_pk ...
    result_line.append(str(points_before_pk))
    result_line.append(str(points_after_pk))

    return AIC_schoolfield, BIC_schoolfield, HQC_schoolfield, result_line
예제 #38
0
def H(g, j):
    return exp(j+g+0.5) / (j+g+0.5)**(j+0.5)
예제 #39
0
def gamma(z, g, N):
    return (z+g-0.5)**(z-0.5) / exp(z+g-0.5) * L(g, z, N)
예제 #40
0
def H(g, j):
    return exp(j+g+0.5) / (j+g+0.5)**(j+0.5)
예제 #41
0
 def integrand(self, x):
     return 1.0 - (1.0 - self.lasum(x)
                   / bigfloat.exp(x, bigfloat.precision(100)))**self.n