示例#1
0
def secant_method(fun, eps, start, end):
    itr = 0
    x = sp.symbols('x')
    F = sp.lambdify(x, fun)
    i = start
    while (i < end):
        if (F(i) * F(i + 1) <= 0):
            x0 = (i * 2 + 1) / 2
            x1 = i + 1
            xr = x1
            error = (-(ln((eps) / (i + 1 - i)) / ln(2)))

            while ((abs(x1 - x0) > eps) and (error > itr)):
                if (F(x1) - F(x0)) == 0:
                    print("cant divided by zero")
                    return

                temp = xr
                xr = (x0 * F(x1) - x1 * F(x0)) / (F(x1) - F(x0))
                x1 = xr
                x0 = temp

                itr = itr + 1
            print("itr", itr, "root:", xr)
            itr = 0
        i = i + 1
示例#2
0
def newtonraphson(fun, eps, start, end):
    itr = 0
    x = sp.symbols('x')
    F = sp.lambdify(x, fun)
    f = derivative(fun, x)

    i = start
    while (i < end):
        if (F(i) * F(i + 1) <= 0):
            x0 = (i * 2 + 1) / 2
            xr = i + 1
            error = (-(ln((eps) / (i + 1 - i)) / ln(2)))

            while ((abs(xr - x0) > eps) and (error > itr)):
                if f(x0) == 0:
                    print("cant divided by zero")
                    return

                temp = xr
                xr = x0 - (F(x0) / f(x0))
                x0 = temp
                itr = itr + 1
            print("itr", itr, "root:", xr)
            itr = 0
        i = i + 1
 def gen_iterations(self, num_of_iters, accuracy=0, exec_finalize=True, print_calc=False):
     """Resets the current state to 0 iterations and generates 'num_of_iters' iterations FROM SCRATCH.
     If the approach type is evaluated and supported, and less then 'num_of_iters' iterations are required to
     achieve an error of at most 'accuracy', if 'accuracy' isn't 0.
     If 'exec_finalize' is True and self.finalize_iterations method exists, it is being invoked at the end."""
     # Calculates the required number of iterations if required, for fast convergence.
     if self._approach_type in ['exp', 'super_exp'] and self._approach_params and accuracy:
         power_base, coeff = self._approach_params
         required_num_of_iters = math.ceil(abs(ln(accuracy / power_base) / ln(power_base)))
         num_of_iters = min(num_of_iters, required_num_of_iters)
     super().gen_iterations(num_of_iters, self.iteration_algorithm, exec_finalize, print_calc=print_calc)
 def __init__(self, norm, L, N=2, dps=2048 * 4, initial_k=1):
     with workdps(dps):
         param = mpf('1.0')
         self.L = L
         self.N = 2
         self.gamma = mpf(gamma_estimate(N))
         self.m = mpf(self.N)
         self.norm = norm
         self.dps = dps
         self.eps_coeff = (self.gamma - 3) / (self.gamma - 1)
         self.sconst = (param * mpf('0.5') * (self.gamma - 3) /
                        (self.gamma - 1))**(mp.ln(2) / mp.ln(self.gamma))
         self.initial_k = initial_k
def exponential_integral_inverse(p):
    initial = None

    if p > 3.5:
        initial = p * mpmath.ln(p)
    elif 0.75 < p <= 3.5:
        initial = p + 1
    elif -0.5 < p <= 0.75:
        initial = 1.45137 + (p * 0.37251)
    elif -43.8 < p <= -0.5:
        initial = 1 + mpmath.e**(p + mpmath.euler)
    else:
        return 1

    return mpmath.ln(halleys_method.solve_fx(initial, p))
	def __ln_expansion(self,u,n_iter = 50):
		from mpmath import ln, pi, sin, exp, mpc
		# TODO: test for pathological cases?
		om1, om3 = self.periods[0]/2, self.periods[1]/2
		assert(u.real < 2*om1 and u.real >= 0)
		assert(u.imag < 2*om3.imag and u.imag >= 0)
		tau = om3/om1
		q = exp(mpc(0,1)*pi()*tau)
		eta1 = self.zeta(om1)
		om1_2 = om1 * 2
		retval = ln(om1_2/pi()) + eta1*u**2/(om1_2) + ln(sin(pi()*u/(om1_2)))
		for r in range(1,n_iter + 1):
			q_2 = q**(2*r)
			retval += q_2/(r * (1 - q_2))*(2. * sin(r*pi()*u/(om1_2)))**2
		return retval
示例#7
0
def Bisection_Method(start_point,end_point,Ep,p):
    error =( -(ln((Ep) / (end_point - start_point))/ln(2)))
    count=0
    while (end_point-start_point)>Ep :
        if (count > error):
            print("this function dosent fit the Bisection Method ")
            break

        count=count+1
        c=(start_point+end_point)/2
        if ((p(start_point)*p(c))>0):
            start_point=c
        else:
            end_point=c
    return c
示例#8
0
def ln_expansion2(wp, u, n_iter=25):
    from mpmath import ln, pi, sin, exp, mpc
    # TODO: test for pathological cases?
    om1, om3 = wp.periods[0] / 2, wp.periods[1] / 2
    assert (u.real < 2 * om1 and u.real >= 0)
    assert (abs(u.imag) < 2 * om3.imag)
    tau = om3 / om1
    q = exp(mpc(0, 1) * pi() * tau)
    eta1 = wp.zeta(om1)
    om1_2 = om1 * 2
    retval = ln(om1_2 / pi()) + eta1 * u**2 / (om1_2) + ln(
        sin(pi() * u / (om1_2)))
    for r in range(1, n_iter + 1):
        q_2 = q**(2 * r)
        retval += q_2 / (r * (1 - q_2)) * (2. * sin(r * pi() * u / (om1_2)))**2
    return retval
示例#9
0
 def draw_inverse_transform(self, curr_time):
     U = random.uniform(0,1)
     while U == 0:
         U = random.uniform(0,1)
     draw = ((-(self.scale**self.shape)*ln(U)+((curr_time)**self.shape))**(1/self.shape) - (curr_time))
     
     return abs(draw)
示例#10
0
    def log_likelihood(params, data, nonzero_only=False):
        """
        Calculates the log-likelihood on the data.

        :param params: two elements list with the location (mu) and shape (sigma)
        parameters.
        :param data: input data as a numpy array.
        :param nonzero_only: whether nonzero element should be considered only.  This is
        used after determining the parameters and comparing to distributions that ignore
        zero values.
        :return: log-likelihood.
        """
        if params[0] < co.EPSILON:
            return co.delta.log_likelihood([0], data)
        elif params[1] < co.EPSILON:
            return co.delta.log_likelihood([params[0]], data)
        else:
            if nonzero_only:
                _samples = data[np.where(data > 0)]
            else:
                _samples = data
            x = np.arange(0, co.DEFAULT_PDF_MAX+1)
            c = np.sum(np.exp(-0.5*np.power((x-params[0])/params[1], 2)))
            return - np.sum(np.power(_samples-params[0], 2))/(2*params[1]**2)\
                - len(_samples)*ln(c)
示例#11
0
    def log_likelihood(params, data, nonzero_only=False):
        """
        Calculates the log-likelihood on the data.

        :param params: two elements list with the location (mu) and shape (sigma)
        parameters.
        :param data: input data as a numpy array.
        :param nonzero_only: unused.
        :return: log-likelihood.
        """
        """
        if nonzero_only:
            nonzero_samples = np.where(data > 0, data, co.EPSILON)
        else:
            nonzero_samples = data[np.where(data > 0)]
        if params[0] < 0:
            return co.delta.log_likelihood(0, data)
        if params[1] < co.EPSILON:
            return co.delta.log_likelihood(exp(params[0]), data)
        else:
            return -np.sum(np.log(nonzero_samples))\
                - np.sum(np.power(np.log(nonzero_samples)-params[0], 2))/(2*params[1]**2)\
                - len(data)*ln(params[1]*sqrt(2*np.pi))
        """  # FIXME continuous log-likelihood
        nonzero_samples = data[np.where(data > 0)]
        if params[0] < co.EPSILON or params[1] < co.EPSILON:
            return co.delta.log_likelihood([0], data)
        else:
            x = np.arange(1, co.DEFAULT_PDF_MAX+1)
            c = np.sum(np.exp(-0.5*np.power((np.log(x)-params[0])/params[1], 2))/x)
            return -np.sum(np.log(nonzero_samples))\
                - np.sum(np.power(np.log(nonzero_samples)-params[0], 2))/(2*params[1]**2)\
                - len(data)*ln(c)
示例#12
0
def ln_expansion(wp,u,n_iter = 50):
	from mpmath import ln, pi, sin, exp, mpc
	# TODO: test for pathological cases?
	if wp.periods[1].real == 0:
		om1, om3 = wp.periods[0]/2, wp.periods[1]/2
	else:
		om1, om3 = wp.periods[1].conjugate()/2, wp.periods[1]/2
	tau = om3/om1
	q = exp(mpc(0,1)*pi()*tau)
	eta1 = wp.zeta(om1)
	om1_2 = om1 * 2
	retval = ln(om1_2/pi()) + eta1*u**2/(om1_2) + ln(sin(pi()*u/(om1_2)))
	for r in range(1,n_iter + 1):
		q_2 = q**(2*r)
		retval += q_2/(r * (1 - q_2))*(2. * sin(r*pi()*u/(om1_2)))**2
	return retval
示例#13
0
def BSLaplace(S, K, T, t, r, sig, N, phi):
    """Solving the Black Scholes PDE in the Laplace domain"""
    x = ln(S / K)
    r = mpf(r)
    sig = mpf(sig)
    T = mpf(T)
    t = mpf(t)
    S = mpf(S)
    K = mpf(K)
    x = mpf(x)
    mu = r - 0.5 * (sig**2)

    tau = T - t
    c1 = mpf('0.5017')
    c2 = mpf('0.6407')
    c3 = mpf('0.6122')
    c4 = mpc('0', '0.2645')

    ans = 0.0
    h = 2 * pi / N
    h = mpf(h)
    for k in range(N / 2):  # Use symmetry
        theta = -pi + (k + 0.5) * h
        z = N / tau * (c1 * theta / tan(c2 * theta) - c3 + c4 * theta)
        dz = N / tau * (-c1 * c2 * theta /
                        (sin(c2 * theta)**2) + c1 / tan(c2 * theta) + c4)
        eps1 = (-mu + sqrt(mu**2 + 2 * (sig**2) * (z + r))) / (sig**2)
        eps2 = (-mu - sqrt(mu**2 + 2 * (sig**2) * (z + r))) / (sig**2)
        b1 = 1 / (eps1 - eps2) * (eps2 / (z + r) + (1 - eps2) / z)
        b2 = 1 / (eps1 - eps2) * (eps1 / (z + r) + (1 - eps1) / z)
        ans += exp(z * tau) * bs(x, b1, b2, eps1, eps2, z, r, phi) * dz
        val = (K * (h / (2j * pi) * ans)).real

    return 2 * val
示例#14
0
    def Weyl_law_N(self,T,T1=None):
        r"""
        The counting function for this space. N(T)=#{disc. ev.<=T}
        
        INPUT:
        
        -  ``T`` -- double


        EXAMPLES::

            sage: M=MaassWaveForms(MySubgroup(Gamma0(1))
            sage: M.Weyl_law_N(10)
            0.572841337202191
            
        """
        (c1,c2,c3,c4,c5)=self._Weyl_law_const
        cc1=RR(c1); cc2=RR(c2); cc3=RR(c3); cc4=RR(c4); cc5=RR(c5)
        #print "c1,c2,c3,c4,c5=",cc1,cc2,cc3,cc4,cc5
        t=sqrt(T*T+0.25)
        try: 
            lnt=ln(t)
        except TypeError:
            lnt=mpmath.ln(t)
        #print "t,ln(t)=",t,lnt
        NT=cc1*t*t-cc2*t*lnt+cc3*t+cc4*t+cc5
        if(T1<>None):
            t=sqrt(T1*T1+0.25)
            NT1=cc1*(T1*T1+0.25)-cc2*t*ln(t)+cc3*t+cc4*t+cc5
            return RR(abs(NT1-NT))
        else:
            return RR(NT)
示例#15
0
    def log_likelihood(params, data, nonzero_only=False):
        """
        Calculates the log-likelihood on the data.

        :param params: two elements list containing the exponent (gamma) and shift (x0).
        :param data: input data as a numpy array.
        :param nonzero_only:  whether nonzero element should be considered only.  This is
        used after determining the parameters  and comparing to distributions that ignore
        zero values.
        :return: log-likelihood.
        """
        if params[0] < co.EPSILON:
            if params[1] < co.EPSILON:
                return co.delta.log_likelihood([0], data)
            else:
                return co.uniform.log_likelihood(None, data)
        else:
            c = float(zeta(params[0], params[1]))
            if c < co.EPSILON:
                return co.delta.log_likelihood([0], data)
            else:
                if nonzero_only:
                    _samples = data[np.where(data > 0)]
                else:
                    _samples = data
                return -params[0]*np.sum(np.log(_samples+params[1])) - len(_samples)*ln(c)
示例#16
0
def BSLaplace(S,K,T,t,r,sig,N,phi): 
        """Solving the Black Scholes PDE in the Laplace domain"""
        x   = ln(S/K)     
        r = mpf(r);sig = mpf(sig);T = mpf(T);t=mpf(t)
        S = mpf(S);K = mpf(K);x=mpf(x)
        mu  = r - 0.5*(sig**2)
       
        tau = T - t   
        c1 = mpf('0.5017')
        c2 = mpf('0.6407')
        c3 = mpf('0.6122')
        c4 = mpc('0','0.2645')        
        
        ans = 0.0
        h = 2*pi/N
        h = mpf(h)
        for k in range(N/2): # Use symmetry
            theta = -pi + (k+0.5)*h
            z     =  N/tau*(c1*theta/tan(c2*theta) - c3 + c4*theta)
            dz    =  N/tau*(-c1*c2*theta/(sin(c2*theta)**2) + c1/tan(c2*theta)+c4)
            eps1  =  (-mu + sqrt(mu**2 + 2*(sig**2)*(z+r)))/(sig**2)
            eps2  =  (-mu - sqrt(mu**2 + 2*(sig**2)*(z+r)))/(sig**2)
            b1    =  1/(eps1-eps2)*(eps2/(z+r) + (1 - eps2)/z)
            b2    =  1/(eps1-eps2)*(eps1/(z+r) + (1 - eps1)/z)
            ans  +=  exp(z*tau)*bs(x,b1,b2,eps1,eps2,z,r,phi)*dz
            val = (K*(h/(2j*pi)*ans)).real
           
            
        return 2*val
示例#17
0
def ln_expansion(wp, u, n_iter=50):
    from mpmath import ln, pi, sin, exp, mpc
    # TODO: test for pathological cases?
    if wp.periods[1].real == 0:
        om1, om3 = wp.periods[0] / 2, wp.periods[1] / 2
    else:
        om1, om3 = wp.periods[1].conjugate() / 2, wp.periods[1] / 2
    tau = om3 / om1
    q = exp(mpc(0, 1) * pi() * tau)
    eta1 = wp.zeta(om1)
    om1_2 = om1 * 2
    retval = ln(om1_2 / pi()) + eta1 * u**2 / (om1_2) + ln(
        sin(pi() * u / (om1_2)))
    for r in range(1, n_iter + 1):
        q_2 = q**(2 * r)
        retval += q_2 / (r * (1 - q_2)) * (2. * sin(r * pi() * u / (om1_2)))**2
    return retval
示例#18
0
    def draw_inverse_transform(self, curr_time):
        U = random.uniform(0, 1)
        while U == 0:
            U = random.uniform(0, 1)
        draw = ((-(self.scale**self.shape) * ln(U) +
                 ((curr_time)**self.shape))**(1 / self.shape) - (curr_time))

        return abs(draw)
示例#19
0
    def log_likelihood(params, data):
        """
        Returns the log-likelihood of a uniform distribution.

        :param params: unused.
        :param data: the data over which the log-likelihood should be calculated.
        :return: log-likelihood.
        """
        return -len(data) * ln(float(np.max(data)))
示例#20
0
class _FreeEnergyCalculator:
    constants: dict
    equation: sp.Function
    simplified_equation: sp.Function

    exp = np.vectorize(lambda x: mp.exp(x))  # this function deals with decimals to
    ln = lambda self, x: mp.ln(x)
    fermifunc = np.vectorize(lambda x, beta: 1 / (1 + mp.exp(beta * x)))

    J_to_cal: float = 0.239005736
    k, T, Vi, Vj = sp.symbols("k T, Vi, Vj")

    def __init__(self):
        pass

    def __str__(self):
        msg = ""
        msg += self.__class__.__name__ + "\n"
        msg += "\tEquation: " + str(self.equation) + "\n"
        msg += (
            "\tConstants:\n\t\t"
            + "\n\t\t".join([str(key) + ":\t" + str(value) for key, value in self.constants.items()])
            + "\n"
        )
        msg += "\tsimplified Equation: " + str(self.simplified_equation) + "\n"
        return msg

    def calculate(self, Vi: (Iterable[Number], Number), Vj: (Iterable[Number], Number)) -> float:
        raise NotImplementedError("This Function needs to be Implemented")

    def _update_function(self):
        self.simplified_equation = self.equation.subs(self.constants)

    @classmethod
    def _prepare_type(self, *arrays):
        return tuple(map(lambda arr: np.array(list(map(lambda x: np.float(x), arr)), ndmin=1), arrays))

    @classmethod
    def get_equation(cls) -> sp.Function:
        """
        get_equation returns the symoblic Equation
        :return: symbolic implementation of Zwanzig
        :rtype: sp.Function
        """
        return cls.equation

    @classmethod
    def get_equation_simplified(cls) -> sp.Function:
        cls._update_function()
        return cls.simplified_equation

    def set_parameters(self):
        """
        set_parameters setter for the parameter
        """
        raise NotImplementedError("This function needs to be implemented")
示例#21
0
    def log_likelihood(params, data):
        """
        Returns the log-likelihood of a delta distribution.
        The distribution is approximated by a narrow Gaussian.

        :param params: single element list with the location parameter.
        :param data: the data over which the log-likelihood should be calculated.
        :return: log-likelihood.
        """
        return -len(data)*ln(EPSILON*sqrt(2*np.pi)) - 0.5*np.sum(0.5*np.power(data-params[0], 2))/EPSILON**2
示例#22
0
def exp_fixpoint(b=e,k=1,prec=53,iprec=None):
    """
    Counting fixpoints as follows:

    For b<=e^(1/e): 
      0 denotes the lower fixpoint on the real axis,
      1 denotes the upper fixed point on the real axis,
      2 denotes the fixpoint in the upper halfplane closest to the real axis, 
      3 the second-closest, etc

    For b>e^(1/e): 
      1 denotes the fixpoint in the upper halfplane closest to the real axis,
      2 the second-closest fixed point, etc.

    Or in other words order the repelling fixed points of the upper halfplane 
    by their distance from the real axis, give the closest fixed point the number 1.
    The attracting fixed point (existent only for b<e**(1/e)) gets index 0.

    Fixpoint k mirrored into the lower halfplane gets index -k.
    """
    if iprec==None:
        iprec=prec+10

    b=num(b,iprec)

    if k==0:
        assert b <= e**(1/e), "b must be <= e**(1/e) for fixpoint number 0, but b=" + repr(b)
    if k>=0:
        branch = -k
    elif b <= e**(1/e) and k==-1:
        branch = -1
    else:
        branch = -k-1

    mpmath.mp.prec = iprec
    fp = mpmath.lambertw(-mpmath.ln(b),branch)/(-mpmath.ln(b))
    if type(fp) == sage.libs.mpmath.ext_main.mpf:
      return num(fp,prec)
    return ComplexField(prec)(fp.real,fp.imag)
示例#23
0
def bic_measure(log_likelihood, params_num, sample_size):
    """
    Returns the Bayesian information criterion value, that is
    BIC = -2ln(L) + m*ln(n),
    where L is the likelihood in the optimum, m and n are the number of model parameters
    and the sample size.

    :param log_likelihood: optimized log-likelihood.
    :param params_num: number of model parameters.
    :param sample_size: sample size.
    :return: BIC value.
    """
    return -2*log_likelihood + params_num*ln(sample_size)
示例#24
0
    def _calc_bar_mpmath(self, C: Number, Vj_i: np.array, Vi_i: np.array, Vi_j: np.array, Vj_j: np.array) -> Number:
        """
            _calc_bar
                this function is calculating the free energy difference of two states for one iteration of the BAR method.
                 It is implemented straight forwad, but therefore not very numerical stable.


        Parameters
        ----------
        Vi_i : np.array
            potential energies of stateI while sampling stateI
        Vj_i : np.array
             potential energies of stateJ while sampling stateI
        Vi_j : np.array
             potential energies of stateI while sampling stateJ
        Vj_j : np.array
             potential energies of stateJ while sampling stateJ

        Returns
        -------
        float
            free energy difference

        """
        # Calculate the potential energy difference in reduced units of kT
        dV_j = (Vi_j - Vj_j) + C
        dV_i = (Vj_i - Vi_i) - C

        # Exponentiate to obtain fermi(-Delta U/kT)

        try:
            ferm_dV_j = self.fermifunc(dV_j,self.constants[self.beta])
            ferm_dV_i = self.fermifunc(dV_i,self.constants[self.beta])
        except OverflowError:
            raise OverflowError(
                "Zwanzig Error: Overflow in exponentiation of potential energy difference. Aborting calculation.")

        # get average
        mean_edV_j = np.mean(ferm_dV_j)
        mean_edV_i = np.mean(ferm_dV_i)

        # Return free energy difference
        try:
            ddF = (1 / self.constants[self.beta]) * mp.ln(mean_edV_j / mean_edV_i)
        except ValueError as err:
            raise ValueError(
                "BAR Error: Problems taking logarithm of the average exponentiated potential energy difference " + str(
                    err.args))

        return np.float(ddF + C)
示例#25
0
def normal_cdf_ln2(x, y, r):
    # Some bad cases present in test data that were computed separately,
    # because the routine here can not provide sufficient accuracy
    if x == Float('-0.299999999999999988897769753748434595763683319091796875') and y == Float('-0.299999999999999988897769753748434595763683319091796875') and r == Float('-0.99999899999999997124433548378874547779560089111328125'):
        return Float('-90020.4997847132695760821045836463571450496491084868602301611')
    if x == Float('-0.1000000000000000055511151231257827021181583404541015625') and y == Float('-0.1000000000000000055511151231257827021181583404541015625'):
        if r == Float('-0.99999899999999997124433548378874547779560089111328125'):
            return Float('-10018.3026957438325237319456255040365689256268122985682785824')
        if r == Float('-0.99999000000000004551026222543441690504550933837890625'):
            return Float('-1014.85016355878529115329386335426754801497185281882358946348')
        if r == Float('-0.9999000000000000110134124042815528810024261474609375'):
            return Float('-111.409512020775362450645082754211442935050576861888726532')
    mpmath.mp.dps = 500
    result = mpmath.ln(mpmath_normal_cdf2(to_mpmath(x), to_mpmath(y), to_mpmath(r)))
    return Float(result)
示例#26
0
    def log_likelihood(params, data, nonzero_only=False):
        """
        Calculates the log-likelihood on the data.
        The factorial is approximated by using Stirling's formula.

        :param params: a one element list containing the shape (lambda) parameter.
        :param data: input data as a numpy array.
        :param nonzero_only: whether nonzero element should be considered only.  This is
        used after determining the parameters and comparing to distributions that ignore
        zero values.
        :return: log-likelihood.
        """
        nonzero_samples = data[np.where(data > 0)]
        if params[0] < co.EPSILON:
            return co.delta.log_likelihood([0], data)
        else:
            if nonzero_only:
                _sampes = data[np.where(data > 0)]
            else:
                _sampes = data
            return np.sum(_sampes)*ln(params[0])\
                - len(_sampes)*params[0]\
                - 0.5*len(_sampes)*ln(2*np.pi) - np.sum((0.5+nonzero_samples)*np.log(nonzero_samples)-nonzero_samples)\
                - np.sum(np.log(1+1/(12*nonzero_samples)+1/(288*np.power(nonzero_samples, 2))))
示例#27
0
    def func(self, x):
    	
        lhs_const = -ln(self.curr_uniform_variate)
        
        for a in self.curr_avail_devices:
            lhs_const += ((self.get_clock_value(self.components[a].read_clock(),self.components[a].component_fail_distr.location) ** self.fail_shape) / -self.fail_scale_to_shape)
        for f in self.curr_failed_devices:
            lhs_const += ((self.get_clock_value(self.components[f].read_repair_clock(),self.components[f].component_repair_distr.location) ** self.repair_shape) / -self.repair_scale_to_shape)
        
        rhs_var=0

        for a in self.curr_avail_devices:
                rhs_var += (((x+self.get_clock_value(self.components[a].read_clock(),self.components[a].component_fail_distr.location))**self.fail_shape) / -self.fail_scale_to_shape)
        for f in self.curr_failed_devices:
                rhs_var += (((x+self.get_clock_value(self.components[f].read_repair_clock(),self.components[f].component_repair_distr.location))**self.repair_shape) / -self.repair_scale_to_shape)
        
        
        return rhs_var-lhs_const
示例#28
0
    def log_likelihood(params, data, nonzero=False):
        """
        Calculates the log-likelihood of the Weibull distribution on the data.

        :param params: two elements list containing the shape (k) and scale (lambda)
        parameters.
        :param data: input data as a numpy array.
        :param nonzero: unused.
        :return: log-likelihood.
        """
        nonzero_samples = data[np.where(data > 0)]
        if params[0] < co.EPSILON or params[1] < co.EPSILON:
            return co.delta.log_likelihood([0], data)
        else:
            x = np.arange(1, co.DEFAULT_PDF_MAX+1)
            c = np.sum(np.power(x, params[0]-1)*np.exp(-np.power(x/params[1], params[0])))
            return (params[0]-1) * np.sum(np.log(nonzero_samples))\
                - 1/params[1]**params[0] * np.sum(np.power(data, params[0]))\
                - len(data) * ln(c)
示例#29
0
    def log_likelihood(params, data, nonzero_only=False):
        """
        Calculates the log-likelihood on the data.

        :param params: single element list containing the scale (beta) parameter.
        :param data: input data as a numpy array.
        :param nonzero_only: whether nonzero element should be considered only.  This is
        used after determining the parameters and comparing to distributions that ignore
        zero values.
        :return: log-likelihood.
        """
        if params[0] < co.EPSILON:
            return co.delta.log_likelihood([0], data)
        else:
            if nonzero_only:
                _samples = data[np.where(data > 0)]
            else:
                _samples = data
            return len(_samples)*ln(1-exp(-1/params[0])) - np.sum(_samples)/params[0]
示例#30
0
    def _calculate_mpmath(self, Vi: (Iterable, Number), Vj: (Iterable, Number)) -> float:
        """
        implementation of zwanzig with mpmath package, another way of having a robust variant,
        but this one is very close to the initial equation thanks to the mpmath package.
        $dF = \frac{1}{\beta} * \ln(\langlee^{-\beta * (V_j-V_i)}\rangle)$
        Parameters
        ----------
        Vi : np.array
            Potential energies of state I
        Vj : np.array
            Potential energies of state J
        Returns
        -------
        float
            free energy difference
        """
        beta = np.float(1 / (self.constants[self.k] * self.constants[self.T]))

        return -(1 / beta) * np.float(
            mp.ln(np.mean(list(map(mp.exp, -beta * (np.array(Vj, ndmin=1) - np.array(Vi, ndmin=1))))))
        )
示例#31
0
def rssi2distanceBook(rssi, txpower=TXPOWER, n=NCONSTANT):
	exp = rssi/10
	mW = pow(10, exp)

	#Aplicamos la función 2.3 del libro
	d0 = 1
	Pij = mW
	P0 = pow(10, TXPOWER/10)
	# np pertenece al rango [2,4], donde los valores bajos se utilizan en entornos abiertos o con poca
	# pérdida de potencia.
	np = 2
	# sigma es la desviación típica de la variable aleatoria distribuida de forma normal que representa
	# el efecto aleatorio producido por las sombras que originan los diferentes obstáculos.
	sigma = 18.75
	mu = 10/mp.ln(10)
	print(Pij, P0, np, sigma, mu, mp.e) 

	#Ahora montamos la función
	distancia = d0
	distancia *= pow(Pij/P0, -1/np)
	distancia *= pow(mp.e, -(sigma*sigma)/(2*mu*mu*np*np))
	return distancia
示例#32
0
def product_term(
    n,
    electron_charge,
    positron_charge,
    vacuum_permativity,
    electron_number_density,
    ionisation_potential,
    initial_energy,
):
    a = (electron_number_density * (positron_charge ** 2) * (electron_charge ** 4)) / \
        (8 * mpmath.pi * (vacuum_permativity ** 2))
    b = 4 / ionisation_potential

    constant_of_integration = mpmath.ei(
        2 * mpmath.ln(b * initial_energy)) / (a * (b**2))
    exp_integral_inv = exponential_integral_inverse(
        a * (b**2) * (constant_of_integration - n))
    denominator = mpmath.e**(exp_integral_inv / 2)
    energy_proportionality_correction = mpmath.sqrt(
        ELECTRON_MASS / (2 * (electron_number_density**2)))

    return 1 - (mpmath.sqrt(
        b / denominator)) / energy_proportionality_correction
示例#33
0
def calculateBlackHoleEntropyOperator( measurement ):
    validUnitTypes = [
        [ 'mass' ],
        [ 'length' ],
        [ 'acceleration' ],
        [ 'area' ],
        [ 'temperature' ],
        [ 'power' ],
        [ 'tidal_force' ],
        [ 'time' ],
    ]

    arguments = matchUnitTypes( [ measurement ], validUnitTypes )

    if not arguments:
        raise ValueError( 'black_hole_entropy: invalid argument' )

    mass = calculateBlackHoleMass( measurement )

    entropy = divide( getProduct( [ getPower( mass, 2 ), 4, pi, getConstant( 'newton_constant' ) ] ),
                      getProduct( [ getConstant( 'reduced_planck_constant' ),
                                    getConstant( 'speed_of_light' ), ln( 10.0 ) ] ) )

    return getConstant( 'boltzmann_constant' ).multiply( entropy ).convert( 'bit' )
示例#34
0
                           None]],  #radian - >degree 
 'radians': ['primitive', [lambda x, y: mp.radians(x),
                           None]],  #degree - >radian 
 #
 'exp': ['primitive', [lambda x, y: mp.exp(x), None]],
 'expj': ['primitive', [lambda x, y: mp.expj(x), None]],  #exp(x*i) 
 'expjpi': ['primitive', [lambda x, y: mp.expjpi(x), None]],  #exp(x*i*pi)
 'expm1': ['primitive', [lambda x, y: mp.expm1(x), None]],  #exp(x)-1
 'power': ['primitive', [lambda x, y: mp.power(x, y[0]), None]],
 'powm1': ['primitive', [lambda x, y: mp.powm1(x, y[0]),
                         None]],  #pow(x, y) - 1 
 'log': [
     'primitive',
     [lambda x, y: mp.log(x) if y is None else mp.log(x, y[0]), None]
 ],
 'ln': ['primitive', [lambda x, y: mp.ln(x), None]],
 'log10': ['primitive', [lambda x, y: mp.log10(x), None]],
 #
 'lambertw': [
     'primitive',
     [
         lambda x, y: mp.lambertw(x)
         if y is None else mp.lambertw(x, y[0]), None
     ]
 ],
 'agm': [
     'primitive',
     [lambda x, y: mp.agm(x) if y is None else mp.agm(x, y[0]), None]
 ],
 #
 'matrix': [
示例#35
0
for i in range(0, len(x_ij)):
    total = 0
    p_ij.append([])
    for j in x_ij[i]:
        total = total + j
    # print(total)
    for j in x_ij[i]:
        p = j / total
        p_ij[i].append(p)

print(p_ij)
# print(len(p_ij[0]))
print('============================')
# 第j指标的熵值

k = 1 / ln(len(p_ij[0]))
# print(k)

for i in range(0, len(p_ij)):
    for j in range(0, len(p_ij[i])):
        p_ij[i][j] += 0.00000001

# print(p_ij)
e_j = []
for i in range(0, len(p_ij)):
    e = 0
    for j in p_ij[i]:
        e = e + (j * ln(j))
    e = -k * e
    # print(e)
    # print('=================================')
示例#36
0
def eval_ln(term, value):
    connect = term[3:len(term) - 1]
    return ln(eval_success(connect, value))
示例#37
0
def getLog( n ):
    return ln( n )
示例#38
0
def dF(x):
    result = (-1 * m.sin(x) * m.ln(x)) + (m.cos(x) / x) + (m.exp(x) / 5)
    return result
示例#39
0
def von_Mises_entropy_fraction(kappa):
    I0_kappa = mpmath.besseli(0,kappa)
    I1_kappa = mpmath.besseli(1,kappa)
    kappa_entropy = mpmath.ln(2*mpmath.pi*I0_kappa) - kappa*(I1_kappa/I0_kappa)
    return 1-(kappa_entropy/von_Mises_entropy_max())
示例#40
0
def getLogOperator(n):
    return ln(n)
示例#41
0
 def _comp_alfa(self, eps_exp):
     eps = mpm.power(2, eps_exp)
     alfa = mpm.sqrt(mpm.ln(1 / eps) / mpm.pi)
     alfa = self._round_up(alfa, 3)
     return alfa
示例#42
0
def dimension(scale, copies):
    return mpmath.ln(copies) / mpmath.ln(scale)
示例#43
0
        energetic barrier between the inactive and the active state.
    logC : Bool.
        boolean indicating if the concentration is given in log scale

    Returns
    -------
    p_act : float.
        The probability of the repressor being in the active state.
    '''
    return (1 + R / Nns * p_act(C, ka, ki, epsilon, logC) * np.exp(-eRA))**-1


# TWO-STATE PROMOTER
# define a np.frompyfunc that allows us to evaluate the sympy.mp.math.hyp1f1
np_log_hyp = np.frompyfunc(lambda x, y, z:
                           mpmath.ln(mpmath.hyp1f1(x, y, z, zeroprec=1000)), 3, 1)


def log_p_m_unreg(mRNA, kp_on, kp_off, gm, rm):
    '''
    Computes the log probability lnP(m) for an unregulated promoter,
    i.e. the probability of having m mRNA.

    Parameters
    ----------
    mRNA : float.
        mRNA copy number at which evaluate the probability.
    kp_on : float.
        rate of activation of the promoter in the chemical master equation
    kp_off : float.
        rate of deactivation of the promoter in the chemical master equation
示例#44
0
def f(x):
    # (4 * (x ** 2))
    result = (m.cos(x) * m.ln(x)) + (m.exp(x) / 5)
    return result
示例#45
0
def von_Mises_entropy_max():
    return mpmath.ln(2*mpmath.pi)