def calc_model_evidence(self):
    vval = 0
    mp.mp.dps = 50
    for action in range(self.hparams.num_actions):
      #  val=1
      #  aa = self.a[action]
      #  for i in xrange(int(self.a[action]-self.a0)):
      #      aa-=1
      #      val*=aa
      #      val/=(2.0*math.pi)
      #      val/=self.b[action]
      #  val*=gamma(aa)
      #  val/=(self.b[action]**aa)
      #  val *= np.sqrt(np.linalg.det(self.lambda_prior * np.eye(self.hparams.context_dim + 1)) / np.linalg.det(self.precision[action]))
      #  val *= (self.b0 ** self.a0)
      #  val/= gamma(self.a0)
      #  vval += val
      #val= 1/float((2.0 * math.pi) ** (self.a[action]-self.a0))
      #val*= (float(gamma(self.a[action]))/float(gamma(self.a0)))
      #val*= np.sqrt(float(np.linalg.det(self.lambda_prior * np.eye(self.hparams.context_dim + 1)))/float(np.linalg.det(self.precision[action])))
      #val*= (float(self.b0**self.a0)/float(self.b[action]**self.a[action]))
      val= mp.mpf(mp.fmul(mp.fneg(mp.log(mp.fmul(2.0 , mp.pi))) , mp.fsub(self.a[action],self.a0)))
      val+= mp.loggamma(self.a[action])
      val-= mp.loggamma(self.a0)
      val+= 0.5*mp.log(np.linalg.det(self.lambda_prior * np.eye(self.hparams.context_dim + 1)))
      val -= 0.5*mp.log(np.linalg.det(self.precision[action]))
      val+= mp.fmul(self.a0,mp.log(self.b0))
      val-= mp.fmul(self.a[action],mp.log(self.b[action]))
      vval+=mp.exp(val)


    vval/=float(self.hparams.num_actions)

    return vval
Esempio n. 2
0
def findPoissonChangePoint( data, factorial ):
	# data is a list of counts in each time period, uniformly spaced

	# the denominator (including both P(D|H1) and constant parts of P(D|H2) )
	C = data.sum()
	N = mpf(len(data))
	denominator = factorial[C-1] * pi / ( 2 * N**C )

	# the numerator (trickier)
	# this needs to be averaged over the possible change points 
	weights = zeros(N,dtype=object)
	CA = 0
	CB = C
	for i in range(1,N) :
		# points up through i are in data set A; the rest are in B
		datapoint = data[i-1]	
		NA = mpf(i)   ; CA += datapoint
		NB = mpf(N-i) ; CB -= datapoint
		
		fraction_num = factorial[CA] * factorial[CB] 
		fraction_den = NA**(CA+1) * NB**(CB+1) * ( (CA/NA)**2 + (CB/NB)**2 )
		#weights.append( fraction_num/fraction_den )
		weights[i-1] = mpf(fraction_num)/fraction_den

	numerator = weights.mean()
	lognum= inv_log10 * log( numerator )
	logden= inv_log10 * log( denominator )
	logodds = lognum - logden
	print "num:",numerator, "log num:", lognum, "| denom:", denominator, "log denom:", logden, "|| log odds:", logodds 

	# If there is a change point, then logodds will be greater than 0
	if logodds < 0 : return None
	return ( weights.argmax(), logodds ) 
Esempio n. 3
0
def polyfit_erfc(nroots, x, low):
    t = x
    if t > 19682.99:
        t = 19682.99

    if t > 1.0:
        tt = mpmath.log(t) / mpmath.log(3) + 1.0  # log3(t) + 1
    else:
        tt = mpmath.sqrt(t)

    it = int(tt)
    tt = tt - it
    tt = 2.0 * tt - 1.0  # map [0, 1] to [-1, 1]
    u = low * 2 - 1  # map [0, 1] to [-1, 1]

    tab_rs, tab_ws = tabulate_erfc(nroots, it)
    im = clenshaw_d1(tab_rs.astype(float), u, nroots)
    rr = clenshaw_d1(im, tt, nroots)
    rr = [r / (1 - r) for r in rr]

    im = clenshaw_d1(tab_ws.astype(float), u, nroots)
    ww = clenshaw_d1(im, tt, nroots)
    if x * low**2 < DECIMALS * .7:
        factor = mpmath.exp(-x * low**2)
        ww = [w * factor for w in ww]
    return rr, ww
Esempio n. 4
0
def cplot_in_terminal(expr, *args, prec=None, logname=None, color=lambda i:
    -mpmath.floor(mpmath.log(abs(i), 10))/(30 -
        mpmath.floor(mpmath.log(abs(i), 10))), points=1000000, **kwargs):
    """
    Run mpmath.cplot() but show in terminal if possible
    """
    kwargs['color'] = color
    kwargs['points'] = points
    from mpmath import cplot
    if prec:
        mpmath.mp.dps = prec
    f = lambdify(t, expr, mpmath)
    try:
        from iterm2_tools.images import display_image_bytes
    except ImportError:
        if logname:
            os.makedirs('plots', exist_ok=True)
            file = 'plots/%s.png' % logname
        else:
            file = None
        cplot(f, *args, file=file, **kwargs)
    else:
        from io import BytesIO
        b = BytesIO()
        cplot(f, *args, **kwargs, file=b)
        if logname:
            os.makedirs('plots', exist_ok=True)
            with open('plots/%s.png' % logname, 'wb') as f:
                f.write(b.getvalue())
        print(display_image_bytes(b.getvalue()))
def pdf_bb_ratio(a1, a2, b1, b2, w):
    lnA = mpmath.log(mpmath.beta(a1, b1)) + mpmath.log(mpmath.beta(a2, b2))

    def pdf_calc(wi):
        if wi < 0:
            print('Ratio below Zero! Not reasonable!')
            exit(1)
        elif wi == 0:
            resulti = 0
        elif wi < 1:
            resulti = mpmath.exp(
                mpmath.log(mpmath.beta(a1 + a2, b2)) +
                (a1 - 1.0) * mpmath.log(wi) +
                log_hyper_2F1(a1 + a2, 1 - b1, a1 + a2 + b2, wi) - lnA)
        else:
            resulti = mpmath.exp(
                mpmath.log(mpmath.beta(a1 + a2, b1)) -
                (1.0 + a2) * mpmath.log(wi) +
                log_hyper_2F1(a1 + a2, 1 - b2, a1 + a2 + b1, (1 / wi)) - lnA)
        return resulti

    if isinstance(w, int) or isinstance(w, float) or isinstance(w, mpmath.mpf):
        result = pdf_calc(w)
    else:
        result = np.zeros(len(w))
        for i in range(len(w)):
            wi = w[i]
            result[i] = pdf_calc(wi)
    return result
Esempio n. 6
0
    def cMLE(self, ip, **kwargs):
        """
        Estimation of c
        """
        c = ip
        if len(kwargs.keys()) != 0:
            a = kwargs['a']
            b = kwargs['b']
        else:
            a = self.arule[self.j + 1]
            b = self.brule[self.j + 1]
        tVec = self.tVec
        tn = tVec[-1]
        exp_tn = exp(-b * pow(tn, c))
        n = np.size(tVec)
        sum_k = 0
        for i in range(n):
            tVeci = tVec[i]
            tVeci1 = tVec[i - 1]
            if i > 0:
                numer = (pow(tVeci, c) * self.expo(i, b, c) * log(tVeci) -
                         pow(tVeci1, c) * self.expo(i - 1, b, c) * log(tVeci1))
                denom = (self.expo(i - 1, b, c) - self.expo(i, b, c))
            else:
                numer = (pow(tVeci, c) * self.expo(i, b, c) * log(tVeci))
                denom = (1 - self.expo(i, b, c))
            sum_k += self.kVec[i] * b * (numer / denom)

        cprime = sum_k - a * b * pow(self.tn, c) * self.expo(-1, b, c) * log(
            self.tn)
        return cprime
Esempio n. 7
0
def findGaussianChangePoint( data ):
	
	# the denominator. This is the easy part.
	N = len( data )

	if N<6 : return None # can't find a cp in data this small

	# set up gamma function table
	#for i in range(N):
		

	s2 = mpf(data.var())
	gpart = gamma( mpf(N)/2.0 - 1 )
	denom = (pi**1.5) * mpf((N*s2))**( -N/2.0 + 0.5 ) * gpart

	# the numerator. A little trickier.
	# calc_twostate_weights() already deals with ts<3 and ts>N-2.
	weights=calc_twostate_weights( data )
	if weights is None: return None

	num = 2.0**2.5 * abs(data.mean()) * weights.mean()

	logodds = log( num ) - log( denom ) 	

	print "num:", num, "log num:", log(num), "| denom:", denom, "log denom:", log(denom), "|| log odds:", logodds 
	
	# If there is a change point, then logodds will be greater than 0
	if logodds < 0 : 
		return None
	
	return ( weights.argmax(), logodds ) 
Esempio n. 8
0
 def mobility(self, z=1000, E=0, T=300, pn=None):
     if pn is None:
         Eg = self.band_gap(T, symbolic=False, electron_volts=False)
         # print Eg, self.__to_numeric(-Eg/(k*T)), mp.exp(self.__to_numeric(-Eg/(k*T)))
         pn = self.Nc(T, symbolic=False) * self.Nv(T, symbolic=False) * mp.exp(
             self.__to_numeric(-Eg / (k * T))) * 1e-12
         # print pn
     N = 0
     for dopant in self.dopants:
         N += dopant.concentration(z)
     N *= 1e-6
     # print N
     mobility = {'mobility_e': {'mu_L': 0, 'mu_I': 0, 'mu_ccs': 0, 'mu_tot': 0},
                 'mobility_h': {'mu_L': 0, 'mu_I': 0, 'mu_ccs': 0, 'mu_tot': 0}}
     for key in mobility.keys():
         mu_L = self.reference[key]['mu_L0'] * (T / 300.0) ** (-self.reference[key]['alpha'])
         mu_I = (self.reference[key]['A'] * (T ** (3 / 2)) / N) / (
         mp.log(1 + self.reference[key]['B'] * (T ** 2) / N) - self.reference[key]['B'] * (T ** 2) / (
         self.reference[key]['B'] * (T ** 2) + N))
         try:
             mu_ccs = (2e17 * (T ** (3 / 2)) / mp.sqrt(pn)) / (mp.log(1 + 8.28e8 * (T ** 2) * (pn ** (-1 / 3))))
             X = mp.sqrt(6 * mu_L * (mu_I + mu_ccs) / (mu_I * mu_ccs))
         except:
             mu_ccs = np.nan
             X = 0
         # print X
         mu_tot = mu_L * (1.025 / (1 + ((X / 1.68) ** (1.43))) - 0.025)
         Field_coeff = (1 + (mu_tot * E * 1e-2 / self.reference[key]['v_s']) ** self.reference[key]['beta']) ** (
         -1 / self.reference[key]['beta'])
         mobility[key]['mu_L'] = mu_L * 1e-4
         mobility[key]['mu_I'] = mu_I * 1e-4
         mobility[key]['mu_ccs'] = mu_ccs * 1e-4
         mobility[key]['mu_tot'] = mu_tot * 1e-4 * Field_coeff
     return mobility
def generate_log_moments(N, max_lambda, noise_scale, lot_size):
    # print('generating log moments')
    L = lot_size
    q = 1.0 * L / N

    # these moments are a function of q, noise_scale and max_lambda

    # generate pdfs which are to be integrated numerically
    pdf1 = lambda x: pdf_gauss(x, noise_scale, mp.mpf(0))
    pdf2 = lambda x: (1 - q) * pdf_gauss(x, noise_scale, mp.mpf(0)) + \
                     q * pdf_gauss(x, noise_scale, mp.mpf(1))

    # placeholder for alpha_M(lambda) for each iteration
    alpha_M_lambda = np.zeros(max_lambda)

    for lambda_val in range(1, max_lambda + 1):
        # it isn't defined which dataset is D and which is D' - thus consider both and take the maximum
        I1_func, I2_func = get_I1_I2_lambda(lambda_val, pdf1, pdf2)
        I1_val = integral_inf_mp(I1_func)
        I2_val = integral_inf_mp(I2_func)

        if I1_val > I2_val:
            alpha_M_lambda[lambda_val - 1] = to_np_float_64(mp.log(I1_val))
        else:
            alpha_M_lambda[lambda_val - 1] = to_np_float_64(mp.log(I2_val))

    return alpha_M_lambda
Esempio n. 10
0
def BrunTerms(m, n, d, a, x0):
    result = (d / m) / (2 * (exp(d / m) - 1)) * X0(m, d, a, x0)**(-2) + w / (
        exp(d / m) - 1) * X0(m, d, a, x0)**(-1 / 2) + 2 * vf1(m, n, a) * (
            1 + d * a) * (d / m + log(X0(m, d, a, x0) * (1 + d))) / log(
                (exp(d / m) - 1) * X0(m, d, a, x0))
    #print(round(result,1))
    return result
Esempio n. 11
0
    def get_apparent_activation_energy(self,rxn_parameters,epsilon=1e-10):
        """
        returns apparent Arrhenius activation energies (in units of R)
        for production/consumption of each gas phase species.
        Calculated as
        E_app = T^2(dlnr_+/dT)=(T^2/r_+)(dr_+/dT), where r+ is the TOF
        :param rxn_parameters: reaction paramenters, see solver-base
        :param epsilon: degree of pertubation in temperature
        :type epsilon: float, optional
        """
        current_tofs = self.get_turnover_frequency(rxn_parameters)
        current_T = self.temperature
        new_T = current_T*(1+epsilon)
        dT = new_T-current_T
        self.temperature = new_T
        descriptors = list(self._rxm.mapper._descriptors) #don't overwrite them, if temperature is a descriptor
        if 'temperature' in self._rxm.descriptor_names:
                index = self._rxm.descriptor_names.index('temperature')
                descriptors[index] = new_T
        rxn_parameters_newT = self._rxm.scaler.get_rxn_parameters(descriptors)
        new_tofs = self.get_turnover_frequency(rxn_parameters_newT)
        E_apps = []
        R = 8.31447e-3/96.485307#units of eV

        for i,gas in enumerate(self.gas_names):
            barriers_i = []
            dlnTOF = mp.log(new_tofs[i])-mp.log(current_tofs[i]) #this will fail if any of the TOFs are 0.
            E_app = R*float(dlnTOF.real)/dT*(current_T**2)
            E_apps.append(E_app)

        self.temperature = current_T
        self._apparent_activation_energy = E_apps
        #self.get_turnover_frequency(rxn_parameters)
        print E_apps
        return E_apps
Esempio n. 12
0
def gaussian_logx_given_r(r, sigma, n_dim):
    """
    Returns logx coordinate corresponding to r values for a Gaussian prior with
    the specificed standard deviation and dimension

    Uses mpmath package for arbitary precision.

    Parameters
    ----------
    r: float or numpy array
        Radial coordinates at which to evaluate logx.
    sigma: float
        Standard deviation of Gaussian.
    n_dim: int
        Number of dimensions.

    Returns
    -------
    logx: float or numpy array
        Logx coordinates corresponding to input radial coordinates.
    """
    exponent = 0.5 * (r / sigma) ** 2
    if isinstance(r, np.ndarray):  # needed to ensure output is numpy array
        logx = np.zeros(r.shape)
        for i, expo in enumerate(exponent):
            logx[i] = float(mpmath.log(mpmath.gammainc(n_dim / 2., a=0, b=expo,
                                                       regularized=True)))
        return logx
    else:
        return float(mpmath.log(mpmath.gammainc(n_dim / 2., a=0, b=exponent,
                                                regularized=True)))
Esempio n. 13
0
def jamieson_hb():
    max_iter = 1121  # maximum iterations/epochs per configuration
    eta = 3  # defines downsampling rate (default=3)
    logeta = lambda x: mpmath.log(x) / mpmath.log(eta)
    s_max = int(
        logeta(max_iter)
    )  # number of unique executions of Successive Halving (minus one)
    B = (
        s_max + 1
    ) * max_iter  # total number of iterations (without reuse) per execution of Succesive Halving (n,r)

    #### Begin Finite Horizon Hyperband outlerloop. Repeat indefinetely.
    for s in reversed(range(s_max + 1)):
        n = int(ceil(int(B / max_iter / (s + 1)) *
                     eta**s))  # initial number of configurations
        r = max_iter * eta**(
            -s)  # initial number of iterations to run configurations for

        #### Begin Finite Horizon Successive Halving with (n,r)
        T = [get_random_hyperparameter_configuration() for i in range(n)]
        print(
            f"{'=' * 90}\n>> Generated {n} arms and evaluated with TPE for {r} resources\n"
        )
        for i in range(s + 1):
            # Run each of the n_i configs for r_i iterations and keep best n_i/eta
            n_i = n * eta**(-i)
            r_i = r * eta**(i)
            val_losses = [
                run_then_return_val_loss(num_iters=r_i, hyperparameters=t)
                for t in T
            ]
            print(
                f"** Evaluated {len(val_losses)} arms (n_i is {n_i}), each with {r_i:.2f} resources"
            )
            T = [T[i] for i in np.argsort(val_losses)[0:int(n_i / eta)]]
Esempio n. 14
0
def getRobbinsConstant( ):
    robbins = fsub( fsub( fadd( 4, fmul( 17, sqrt( 2 ) ) ), fmul( 6, sqrt( 3 ) ) ), fmul( 7, pi ) )
    robbins = fdiv( robbins, 105 )
    robbins = fadd( robbins, fdiv( log( fadd( 1, sqrt( 2 ) ) ), 5 ) )
    robbins = fadd( robbins, fdiv( fmul( 2, log( fadd( 2, sqrt( 3 ) ) ) ), 5 ) )

    return robbins
Esempio n. 15
0
def pdf(x, df, nc):
    """
    Probability density function of the noncentral t distribution.

    The infinite series is estimated with `mpmath.nsum`.
    """
    with mpmath.extradps(5):
        x = mpmath.mpf(x)
        df = mpmath.mpf(df)
        nc = mpmath.mpf(nc)

        if x == 0:
            logp = (-nc**2 / 2 - mpmath.log(mpmath.pi) / 2 -
                    mpmath.log(df) / 2 + mpmath.loggamma(
                        (df + 1) / 2) - mpmath.loggamma(df / 2))
            p = mpmath.exp(logp)
        else:
            logc = (df * mpmath.log(df) / 2 - nc**2 / 2 -
                    mpmath.loggamma(df / 2) - mpmath.log(mpmath.pi) / 2 -
                    (df + 1) / 2 * mpmath.log(df + x**2))
            c = mpmath.exp(logc)

            def _pdf_term(i):
                logterm = (mpmath.loggamma(
                    (df + i + 1) / 2) + i * mpmath.log(x * nc) +
                           i * mpmath.log(2 / (df + x**2)) / 2 -
                           mpmath.loggamma(i + 1))
                return mpmath.exp(logterm).real

            s = mpmath.nsum(_pdf_term, [0, mpmath.inf])
            p = c * s
        return p
Esempio n. 16
0
def polyfit_erfc(nroots, x, low):
    t = x
    if t > 19682.99:
        t = 19682.99

    if t > 1.0:
        tt = mpmath.log(t) / mpmath.log(3) + 1.0  # log3(t) + 1
    else:
        tt = mpmath.sqrt(t)

    it = int(tt)
    tt = tt - it
    tt = 2.0 * tt - 1.0  # map [0, 1] to [-1, 1]
    u = low * 2 - 1  # map [0, 1] to [-1, 1]

    tab_rs, tab_ws = tabulate_erfc(nroots, it)
    im = clenshaw_d1(tab_rs.astype(float), u, nroots)
    imc = intermediate(im)
    rr = clenshaw_d1(imc, tt, nroots)
    rr = [r / (1 - r) for r in rr]

    im = clenshaw_d1(tab_ws.astype(float), u, nroots)
    imc = intermediate(im)
    ww = clenshaw_d1(imc, tt, nroots)
    return rr, ww
def custom_logsumexp_mpmath(logs, signs):
    positive_mask = signs > 0
    positive_logs = np.array(logs)[positive_mask]
    negative_logs = np.array(logs)[positive_mask == False]

    res_pos = mpmath.mpf(0.0)
    res_neg = None

    if (len(positive_logs) > 0):
        res_pos = max(positive_logs) + mpmath.log(
            sum([mpmath.exp(i - max(positive_logs)) for i in positive_logs]))

    if (len(negative_logs) > 0):
        res_neg = max(negative_logs) + mpmath.log(
            sum([mpmath.exp(i - max(negative_logs)) for i in negative_logs]))

    if (res_neg is None):
        return res_pos, 1.0

    if (res_pos == res_neg):
        print("not enough precision!!!...")
        exit(-1)
        return None, None
    elif (res_pos == res_neg and res_pos == 0):
        print("0?!")
        print(logs)
        exit(-1)
    if (res_neg < res_pos):
        return res_neg + mpmath.log(mpmath.exp(res_pos - res_neg) - 1), 1.0
    else:
        return res_pos + mpmath.log(mpmath.exp(res_neg - res_pos) - 1), -1.0
Esempio n. 18
0
def findGaussianChangePoint(data):

    # the denominator. This is the easy part.
    N = len(data)

    if N < 6: return None  # can't find a cp in data this small

    # set up gamma function table
    #for i in range(N):

    s2 = mpf(data.var())
    gpart = gamma(mpf(N) / 2.0 - 1)
    denom = (pi**1.5) * mpf((N * s2))**(-N / 2.0 + 0.5) * gpart

    # the numerator. A little trickier.
    # calc_twostate_weights() already deals with ts<3 and ts>N-2.
    weights = calc_twostate_weights(data)
    if weights is None: return None

    num = 2.0**2.5 * abs(data.mean()) * weights.mean()

    logodds = log(num) - log(denom)

    print "num:", num, "log num:", log(
        num), "| denom:", denom, "log denom:", log(
            denom), "|| log odds:", logodds

    # If there is a change point, then logodds will be greater than 0
    if logodds < 0:
        return None

    return (weights.argmax(), logodds)
Esempio n. 19
0
def mp_fast_logsumexp(X, coeffs=None):
    """fast_logsumexp for high precision numbers using mpmath.
    
    Parameters
    ----------
    X : ndarray
        Terms inside logs.
    coeffs : ndarray
        Factors in front of exponentials. 

    Returns
    -------
    float
        Value of magnitude of quantity inside log (the sum of exponentials).
    float
        Sign.
    """

    Xmx = max(X)
    if coeffs is None:
        y = sum(map(mp.exp, X - Xmx))
    else:
        y = np.array(coeffs).dot(list(map(mp.exp, X - Xmx)))

    if y < 0:
        return mp.log(abs(y)) + Xmx, -1.
    return mp.log(y) + Xmx, 1.
Esempio n. 20
0
    def log_likelihood(self,
                       data,
                       shape,
                       loc,
                       scale,
                       dx='1e-10',
                       precision=100):
        """
        Calculates log-likelihood.

        Parameters
        ----------
        data : float or array_like
        shape : float
        loc : float
        scale : float
        dx : str, optional
        precision : int, optional
        """

        with mpmath.workdps(precision):
            # Make sure passed values are valid
            data = self.check_support(data, shape, loc, scale, dx, precision)

            if np.isscalar(data):
                return mpmath.log(
                    self.pdf(data, shape, loc, scale, dx, precision))
            else:
                return mpmath.fsum([
                    mpmath.log(self.pdf(_x, shape, loc, scale, dx, precision))
                    for _x in data
                ])
Esempio n. 21
0
    def set_precision_parameters(self):
        r = numpy.min(self.shape)
        m, n = self.shape
        zeta = float(
            (2*numpy.sqrt(-m*n*log(self.delta))) -
            (2*log(self.delta)) + (m*n)
        )
        printd('r: {r}\nm: {m}\nn: {n}\nzeta: {z}',
               level=6, r=r, m=m, n=n, z=zeta)

        self.precision_parameters = dict()
        ''' Alpha as defined in Theorm 2. p8. equation 1
        '''
        self.precision_parameters['alpha'] = (
            (generalised_harmonic_sum(r)+generalised_harmonic_sum(r, 0.5)
             )*pow(self.query_sup, 2) +
            2*generalised_harmonic_sum(r)*self.query_sup*self.sensitivity
        )
        ''' Beta as defined in Theorm 2. p8. equation 1
        '''
        self.precision_parameters['beta'] = (
            2*pow(n*m, 0.25)*zeta*generalised_harmonic_sum(r)*self.sensitivity
        )
        ''' Omega as defined in Theorm 3. p12. equation 8
        '''
        self.precision_parameters['omega'] = (
            4*generalised_harmonic_sum(r)*self.query_sup*self.sensitivity
        )
def dpint(f,snr):
	'''Integrand of the detection probability of single sources. Since it contains a modified Bessel function, which gets very big values, it has to be defined in a special way.'''
	big=mpmath.log(mpmath.besseli(1,snr*np.sqrt(2.*f)))
	small=mpmath.mpf(-f-0.5*snr**2.)
	normal=mpmath.log(np.sqrt(2.*f)*1./snr)
	result=mpmath.exp(mpmath.fsum([big,small,normal]))
	return float(result) #In the end the result should be between 0 and some sizeable number, so a float should be enough.
Esempio n. 23
0
    def _w_tilde(self, u_bar):
        """Compute w_tilde, the threshold for the word-length w such that
		MSB = computeNaiveMSB    if w >= w_tilde
		MSB = computeNaiveMSB+1  if w < w_tilde
		(this doesn't count into account the roundoff error, as in FxPF)
		See ARITH26 paper
		Parameters:
			- u_bar: vector of bounds on the inputs of the system
		Returns: a vector of thresholds w_tilde

		We use:  w_tilde = 1 + ceil(log2(zeta_bar)) - floor(log2( 2^ceil(log2(zeta_bar)) - zeta_bar ))
		with zeta_bar = <<Hzeta>>.u_bar
		"""
        #TODO: test if zeta_bar is a power of 2 (should be +Inf in that case)
        zeta_bar = self.Hzeta.WCPG() * u_bar

        with mpmath.workprec(500):  # TODO: compute how many bit we need !!
            wtilde = [
                int(1 + mpmath.ceil(mpmath.log(x[0], 2)) - mpmath.floor(
                    mpmath.log(
                        mpmath.power(2, mpmath.ceil(mpmath.log(x[0], 2))) -
                        x[0], 2))) for x in zeta_bar.tolist()
            ]

        return wtilde
Esempio n. 24
0
def getSuperRootsOperator(n, k):
    '''Returns all the super-roots of n, not just the nice, positive, real one.'''
    k = fsub(k, 1)
    factors = [fmul(i, root(k, k)) for i in unitroots(int(k))]
    base = root(fdiv(log(n), lambertw(fmul(k, log(n)))), k)

    return [fmul(i, base) for i in factors]
Esempio n. 25
0
def genenergies(fnR,fnQ,seqsR,seqsQ,gamma,sQ,sR,R0): #Parses seqs and model type then calculates and returns energies R is transcription factor, Q is RNAP
    ematR = np.genfromtxt(fnR,skiprows=1)
    ematQ = np.genfromtxt(fnQ,skiprows=1)
    fR = open(fnR)
    fQ = open(fnQ)
    mattype = fR.read()[:6] #mattype must be the same
    #mattypeQ = fQ.read()[:6]
    energies = np.zeros(len(seqsQ))
    N = len(seqsQ)
    mut_region_lengthQ = len(seqsQ[0])
    mut_region_lengthR = len(seqsR[0])
    
    if mattype == '1Point':
            for i,s in enumerate(seqsR):
                seq_matR = seq2mat(s)
		seq_matQ = seq2mat(seqsQ[i])
		RNAP = (seq_matQ*ematQ).sum()*sQ
		TF = (seq_matR*ematR).sum()*sR + R0
                energies[i] = -RNAP + mp.log(1 + mp.exp(-TF - gamma)) - mp.log(1 + mp.exp(-TF))
    '''
    elif mattype == '2Point':
            for i,s in enumerate(seqs):
                seq_mat = np.zeros(round(sp.misc.comb(mut_region_length,2))*16)
                seq_mat[seq2mat2(s)] = 1
                energies[i] = (seq_mat*(emat.ravel())).sum()
    elif mattype == '3Point':
            for i,s in enumerate(seqs):
                seq_mat = np.zeros(round(sp.misc.comb(mut_region_length,3))*64)
                seq_mat[seq2mat3(s)] = 1
                energies[i] = (seq_mat*(emat.ravel())).sum()
    '''
    return energies
Esempio n. 26
0
    def get_apparent_activation_energy(self,rxn_parameters,epsilon=1e-10):
        """
        returns apparent Arrhenius activation energies (in units of R)
        for production/consumption of each gas phase species.
        Calculated as
        E_app = T^2(dlnr_+/dT)=(T^2/r_+)(dr_+/dT), where r+ is the TOF
        :param rxn_parameters: reaction paramenters, see solver-base
        :param epsilon: degree of pertubation in temperature
        :type epsilon: float, optional
        """
        current_tofs = self.get_turnover_frequency(rxn_parameters)
        current_T = self.temperature
        new_T = current_T*(1+epsilon)
        dT = new_T-current_T
        self.temperature = new_T
        descriptors = list(self._rxm.mapper._descriptors) #don't overwrite them, if temperature is a descriptor
        if 'temperature' in self._rxm.descriptor_names:
                index = self._rxm.descriptor_names.index('temperature')
                descriptors[index] = new_T
        rxn_parameters_newT = self._rxm.scaler.get_rxn_parameters(descriptors)
        new_tofs = self.get_turnover_frequency(rxn_parameters_newT)
        E_apps = []
        R = 8.31447e-3/96.485307#units of eV

        for i,gas in enumerate(self.gas_names):
            barriers_i = []
            dlnTOF = mp.log(new_tofs[i])-mp.log(current_tofs[i]) #this will fail if any of the TOFs are 0.
            E_app = R*float(dlnTOF.real)/dT*(current_T**2)
            E_apps.append(E_app)

        self.temperature = current_T
        self._apparent_activation_energy = E_apps
        #self.get_turnover_frequency(rxn_parameters)
        print(E_apps)
        return E_apps
Esempio n. 27
0
def lnProb_rhoA2rhoB2_given_rhoA2orhoB2o( rhoA2, rhoB2, rhoA2o, rhoB2o ):
    '''
    return ln( p(rhoA2,rhoB2|rhoA2o,rhoB2o) )

    NOTE: essentially 2 independent chi2 distributions
    '''
    types = (int,float)
    if isinstance(rhoA2, types) and isinstance(rhoB2, types) \
      and isinstance(rhoA2o, types) and isinstance(rhoB2o, types):
        return np.log(0.25) - 0.5*(rhoA2 + rhoB2 + rhoA2o + rhoB2o) \
            + float(mpmath.log(mpmath.besseli(0, (rhoA2*rhoA2o)**0.5))) \
            + float(mpmath.log(mpmath.besseli(0, (rhoB2*rhoB2o)**0.5)))
    else:
        N = len(rhoA2)
        assert N==len(rhoB2), 'rhoA2 and rhoB2 do not have the same length'
        assert N==len(rhoA2o), 'rhoA2 and rhoA2o do not have the same length'
        assert N==len(rhoB2o), 'rhoA2 and rhoB2o do not have the same length'

        log25 = np.log(0.25)
        ans = [log25 - 0.5*(rhoa2 + rhob2 + rhoa2o + rhob2o) \
            + float(mpmath.log(mpmath.besseli(0, (rhoa2*rhoa2o)**0.5))) \
            + float(mpmath.log(mpmath.besseli(0, (rhob2*rhob2o)**0.5))) \
            for rhoa2, rhob2, rhoa2o, rhob2o in zip(rhoA2, rhoB2, rhoA2o, rhoB2o)
        ]
        if isinstance(rhoA2, np.ndarray) or isinstance(rhoB2, np.ndarray) or isinstance(rhoA2o, np.ndarray) or isinstance(rhoB2o, np.ndarray):
            ans = np.array(ans)

        ans[ans!=ans] = -np.infty ### get rid of nans
                              ### FIXME we may want to be smarter about this and 
                              ### prevent nans from showing up in the first place
        return ans
Esempio n. 28
0
def getRobbinsConstant( ):
    robbins = fsub( fsub( fadd( 4, fmul( 17, sqrt( 2 ) ) ), fmul( 6, sqrt( 3 ) ) ), fmul( 7, pi ) )
    robbins = fdiv( robbins, 105 )
    robbins = fadd( robbins, fdiv( log( fadd( 1, sqrt( 2 ) ) ), 5 ) )
    robbins = fadd( robbins, fdiv( fmul( 2, log( fadd( 2, sqrt( 3 ) ) ) ), 5 ) )

    return robbins
Esempio n. 29
0
    def _compute_eps(lam):
        session = WolframLanguageSession(wlpath)
        session.evaluate(
            wlexpr('''
           randomgamma[alpha_, beta_, gamma_, samples_] := RandomVariate[GammaDistribution[alpha, beta, gamma, 0], samples];
         '''))
        random_gamma = session.function(wlexpr('randomgamma'))

        session.evaluate(
            wlexpr('''
           integrant[exponents_, beta_, dimension_, clippingbound_, lam_, r_, q_] := Mean[NIntegrate[
                           (Sin[x]^(dimension-2)*Gamma[dimension/2]/(Sqrt[Pi]*Gamma[(dimension-1)/2]))*(((1-q)*(1-q+
                           q*Exp[(r^exponents-(r^2+clippingbound^2-2*r*clippingbound*Cos[x])^(exponents/2))/beta])^(lam))
                        +(q*(1-q+q*Exp[((r^2+clippingbound^2+2*r*clippingbound*Cos[x])^(exponents/2)-r^exponents)/beta])^(lam))),{x,0,Pi}
                           ]];
         '''))
        integrant_moment = session.function(wlexpr('integrant'))
        samples = random_gamma(FLAGS.dimension / FLAGS.exponents,
                               beta**(1 / FLAGS.exponents), FLAGS.exponents,
                               FLAGS.num_samples)
        moment = integrant_moment(FLAGS.exponents, beta, FLAGS.dimension,
                                  FLAGS.clippingbound, lam, samples, FLAGS.q)
        eps = (FLAGS.T * mp.log(moment) + mp.log(1 / FLAGS.delta)) / lam
        session.terminate()
        return eps
Esempio n. 30
0
def integral(pos, shift, poles):
    """
		Returns the inner product of two monic monomials with respect to the
		positive measure prefactor that turns a `PolynomialVector` into a rational
		approximation to a conformal block.
	"""
    single_poles = []
    double_poles = []
    ret = mpmath.mpf(0)

    for p in poles:
        p = mpmath.mpf(str(p))

        if (p - shift) in single_poles:
            single_poles.remove(p - shift)
            double_poles.append(p - shift)
        elif (p - shift) < 0:
            single_poles.append(p - shift)

    for i in range(0, len(single_poles)):
        denom = mpmath.mpf(1)
        pole = single_poles[i]
        other_single_poles = single_poles[:i] + single_poles[i + 1:]
        for p in other_single_poles:
            denom *= pole - p
        for p in double_poles:
            denom *= (pole - p)**2
        ret += (mpmath.mpf(1) / denom) * (rho_cross**pole) * (
            (-pole)**pos) * mpmath.factorial(pos) * mpmath.gammainc(
                -pos, a=pole * mpmath.log(rho_cross))

    for i in range(0, len(double_poles)):
        denom = mpmath.mpf(1)
        pole = double_poles[i]
        other_double_poles = double_poles[:i] + double_poles[i + 1:]
        for p in other_double_poles:
            denom *= (pole - p)**2
        for p in single_poles:
            denom *= pole - p
        # Contribution of the most divergent part
        ret += (mpmath.mpf(1) /
                (pole * denom)) * ((-1)**(pos + 1)) * mpmath.factorial(pos) * (
                    (mpmath.log(rho_cross))**(-pos))
        ret -= (mpmath.mpf(1) / denom) * (rho_cross**pole) * (
            (-pole)**(pos - 1)) * mpmath.factorial(pos) * mpmath.gammainc(
                -pos, a=pole *
                mpmath.log(rho_cross)) * (pos + pole * mpmath.log(rho_cross))

        factor = 0
        for p in other_double_poles:
            factor -= mpmath.mpf(2) / (pole - p)
        for p in single_poles:
            factor -= mpmath.mpf(1) / (pole - p)
        # Contribution of the least divergent part
        ret += (factor / denom) * (rho_cross**pole) * (
            (-pole)**pos) * mpmath.factorial(pos) * mpmath.gammainc(
                -pos, a=pole * mpmath.log(rho_cross))

    return (rho_cross**shift) * ret
def eta(lam):
    """Function from DLMF 8.12.1 shifted to be centered at 0."""
    if lam > 0:
        return mp.sqrt(2*(lam - mp.log(lam + 1)))
    elif lam < 0:
        return -mp.sqrt(2*(lam - mp.log(lam + 1)))
    else:
        return 0
Esempio n. 32
0
def S4(m, s):
    dZD = lambda t: (
        (5 - 2 * s) * A * log(t)**(4 - 2 * s) * t**
        (8 / 3 * (1 - s) - 1) + A * log(t)**(5 - 2 * s) *
        (8 / 3 * (1 - s)) * t**(8 / 3 *
                                (1 - s) - 1) + 2 * B * log(t) / t) / t**(m + 1)
    #return 2*ZD(s, H)/H**(m + 1) #simplified expression for optimising with higher Riemann height
    return ZD(s, H) / H**(m + 1) + sum(list(quad(dZD, H, inf)))
Esempio n. 33
0
def wme_gen(f, pYs_p, pZs_p, X, alpha, g_p, delta=0):
    # convert probabilities to mpmath.mpf
    pYs = [{k: mpmath.mpmathify(pY[k]) for k in pY} for pY in pYs_p]
    pZs = [{k: mpmath.mpmathify(pZ[k]) for k in pZ} for pZ in pZs_p]
    g = {w: {y: mpmath.mpmathify(g_p[w][y]) for y in g_p[w]} for w in g_p}
    delta = mpmath.mpmathify(delta)

    # precondition
    # bypass precondition if number of arguments == 0, i.e. if f is defined as a function of another function
    if len(X) + len(pYs) + len(pZs) != len(inspect.getargspec(f)[0]) and len(
            inspect.getargspec(f)[0]) != 0:
        sys.exit(
            "Aborted: Number of function arguments and inputs didn't match")

    if len(inspect.getargspec(f)[0]) == 0:
        try:
            a = f(*((0, ) * (len(X) + len(pYs) + len(pZs))))
        except KeyError:
            # here we tolerate KeyError because merged functions might not recognise input (0, 0, ...)
            pass
        except:
            sys.exit(
                "Aborted: Number of function arguments and inputs didn't match"
            )

    # joint distribution for Output and Y
    pOaY = dict()
    for Y in itertools.product(*pYs):
        for Z in itertools.product(*pZs):
            o = f(*(X + Y + Z))
            p_o = prod((pYZ[yz] for (yz, pYZ) in zip(Y + Z, pYs + pZs)))
            # critical for distributions with a zero probability!
            # next line ensures that only possible events appear in pO (the if)
            if p_o > 0:
                if o in pOaY:
                    if Y in pOaY[o]:
                        pOaY[o][Y] += p_o
                    else:
                        pOaY[o][Y] = p_o
                else:
                    pOaY[o] = {Y: p_o}

    # W[o] is the vector inside the norm (over guesses w)
    W = dict()
    for o in pOaY:
        W[o] = [
            sum(pOaY[o][Y] * g[w][Y] for Y in g[w] if Y in pOaY[o]) + delta
            for w in g
        ]

    if alpha == -1:
        sum_o = sum(max(W[o]) for o in pOaY)
        result = -mpmath.log(sum_o, b=2)
    else:
        sum_o = sum(p_norm(W[o], alpha) for o in pOaY)
        result = (alpha / (1 - alpha)) * mpmath.log(sum_o, b=2)

    return result
def redistill(distillate_filename, out_filename, expected_dps=60,
              newton_steps=7,
              min_model_digits=None):
  """Second-distills a distillate."""
  if mpmath.mp.dps < expected_dps:
    raise RuntimeError(
        'Precision setting for mpmath is below the expected dps '
        'for this calculation: %s < %s' % (mpmath.mp.dps, expected_dps))
  # Tries to find out if there are further opportunities that reduce the number
  # of parameters which have been missed in 1st distillation, and if so,
  # performs the corresponding reduction.
  # In any case, adds basic information about physics.
  distillate_model = read_distillate_model(distillate_filename)
  v70 = v70_from_model(distillate_model)
  if expected_dps > 15:
    sinfo0 = scalar_sector_mpmath.mpmath_scalar_manifold_evaluator(v70)
  else:
    sinfo0 = scalar_sector.numpy_scalar_manifold_evaluator(v70)
  threshold_deviation = (max(3 * sinfo0.stationarity, 1e-7)
                         if expected_dps >= 40 else 1e-3)
  # First-distillation produced a high-accuracy form of the numerical
  # solution, so we should use a stricter check here.
  def still_good(v70_trial):
    sinfo = scalar_sector.numpy_scalar_manifold_evaluator(v70_trial)
    return (abs(sinfo0.potential - sinfo.potential) < threshold_deviation and
            sinfo.stationarity < threshold_deviation)
  # This is strongly expected to work, given that we already had highly
  # accurate data.
  low_dim_model_take2 = iter(find_simple_low_dimensional_model(
      [v70],
      min_digits=max(3, min_model_digits or int(
          -mpmath.log(sinfo0.stationarity, 10)) // 2 - 2),
      still_good=still_good)).next()
  if len(low_dim_model_take2.params) < len(distillate_model.params):
    print('Note: Could further reduce the number of model parameters '
          '({} -> {}).'.format(distillate_model.params,
                               low_dim_model_take2.params))
  model_take2, mdnewton_ok = distill_model(
      low_dim_model_take2,
      newton_steps=newton_steps,
      still_good=still_good,
      target_digits_position=expected_dps)
  v70_accurate = v70_from_model(model_take2)
  sinfo_accurate = scalar_sector_mpmath.mpmath_scalar_manifold_evaluator(
      v70_accurate)
  stationarity = sinfo_accurate.stationarity
  log10_stationarity = math.floor(mpmath.log(stationarity, 10))
  approx_stationarity_str = (
      '%.3fe%d' %
      (float(stationarity *
             mpmath.mpf(10)**(-log10_stationarity)),
       log10_stationarity))
  with open(out_filename, 'w') as h:
    write_model(h, model_take2,
                dict(mdnewton_ok=mdnewton_ok,
                     potential=str(sinfo_accurate.potential),
                     stationarity=approx_stationarity_str))
Esempio n. 35
0
def mpmsb(value, signed):
    if isinstance(value, Interval):
        return np.max([mpmsb(value.lower_bound, signed=signed),
                       mpmsb(value.upper_bound, signed=signed)])
    if value > 0:
        return int(mpmath.floor(mpmath.log(value, 2))) + int(signed)
    if value < 0:
        return int(mpmath.ceil(mpmath.log(-value, 2)))
    return -np.inf
def compute_similarity(partitions_themes, similarity='KLDivergence'):
    """
    Third step of the Temporal Text Mining algorithm. Receives the distribution of all themes in two partitions.
    Computes and returns the similarity between all the themes.

    :param partitions_themes: Contains the distribution of all themes in the partitions.
    :type list
    :param similarity: Specifies which similarity measure to use
    :type str

    :return: Returns the computed similarities between all the themes.
    :rtype: list
    """

    # Declare the similarity matrix to store the computed similarities
    similarity_matrix = []

    # Compute the similarities between all the themes
    for theme1_index, theme1 in enumerate(partitions_themes[0]):

        # Creates a new line in the matrix
        similarity_matrix.append([])

        for theme2_index, theme2 in enumerate(partitions_themes[1]):

            # If the similarity is KL-Divergence
            if similarity == 'KLDivergence':
                vocabulary_size = len(theme1)
                klDivergence = 0
                for word_index in range(vocabulary_size):
                    klDivergence += theme2[word_index]*math.log(theme2[word_index]/theme1[word_index])
                similarity_matrix[theme1_index].append(1/klDivergence)

            # If the similarity is Jensen-Shanon Divergence (JSD)
            if similarity == 'JSDivergence':
                vocabulary_size = len(theme1)
                jsDivergence = 0
                for word_index in range(vocabulary_size):
                    jsDivergence += (0.5*theme1[word_index]*math.log(theme1[word_index]/theme2[word_index]) +
                                     0.5*theme2[word_index]*math.log(theme2[word_index]/theme1[word_index]))
                similarity_matrix[theme1_index].append(1/jsDivergence)

            # If the similarity is the support of the distributions
            if similarity == 'support':
                # Compute the support of each theme distribution
                threshold = 0.001
                theme1_support = (theme1 > threshold)
                theme2_support = (theme2 > threshold)

                # Compute and store the similarity between the distributions' support
                support_similarity = sum((theme1_support & theme2_support))
                similarity_matrix[theme1_index].append(support_similarity)

    # Returns the computed similarities between all the themes.
    return similarity_matrix
Esempio n. 37
0
        def pqCandidates(z, w = w):

            PiI = mpmath.pi * 1j

            if abs(1 - z) < globalsettings.getSetting("maximalError"):
                return (z, 0, 0), 1

            p = mpmathRoundToInt( (w.w0 - mpmath.log(  z)) / PiI)
            q = mpmathRoundToInt( (w.w1 + mpmath.log(1-z)) / PiI)
            err = abs(w.w2 + mpmath.log(z) - mpmath.log(1-z) + p * PiI + q * PiI)

            return (z, p, q), err
Esempio n. 38
0
def logOfSquare(c):
    return mpmath.log(c)

    csquare = c * c

    maxErr = globalsettings.getSetting("maximalError")

    if not ((csquare.real > 0 or
             abs(csquare.imag) > maxErr)):
        raise NumericalError(c, msg = "logOfSqaure near branch cut")

    return mpmath.log(csquare) / 2
Esempio n. 39
0
def my_secant(eq, p1, p2, debug=False):
    tol = mp.mpf(0.1)
    max_count = 10000
    sol = 0
    for count in range(max_count):
        if debug: print (count + 1), ''
        if p1 == p2:
            sol = p1
            break
        y1 = eq(p1)
        y2 = eq(p2)
        if debug: print '-->', p1, '->', y1
        if debug: print '-->', p2, '->', y2
        if abs(y1) < abs(y2):
            sol = p1
            err = abs(y1)
        else:
            sol = p2
            err = abs(y2)
        if err < tol:
            break
        if mp.sign(y1) * mp.sign(y2) < 0:
            # p3 = (p1+p2)/mpf(2)
            x1 = mp.log(p1)
            x2 = mp.log(p2)
            # x1 = p1
            # x2 = p2
            # x3 = (x2*y1 - x1*y2)/(y1-y2)
            # if x3 == x1 or x3 == x2:
            x3 = (x1 + x2) / mp.mpf(2)
            p3 = mp.exp(x3)
            # p3 = x3
            if p3 == p1 or p3 == p2:
                break
            y3 = eq(p3)
            if debug: print '--->', x1, x2, x3, p3, '->', y3
            if mp.sign(y3) == mp.sign(y1):
                p1 = p3
            else:
                p2 = p3
        elif mp.sign(y1) * mp.sign(y2) == 0:
            if y1 == 0:
                sol = p1
            elif y2 == 0:
                sol = p2
            else:
                raise Exception('Strange: sign returns zero! without zeros $)')
            break
        else:
            raise Exception('Functin has same sign on both ends')
    if debug: print 'Solution:', sol
    return sol
Esempio n. 40
0
    def energy(self, clustering):
        energy = mpmath.mpf(0.0)
        new_vertex_distributions = _combine_vertex_distributions_given_clustering(
            self.vertex_distributions, clustering)

        # likelihood
        likelihood_energy = -self._log_likelihood(clustering, new_vertex_distributions)

        # prior on similarity:
        # We prefer the cluster whose minimum similarity is large.
        # - the similarity of a pair of vertexes is measured by the similarity
        #   of top 10 words in the distribution. (measure each word type
        #   respectively and take average)
        intra_cluster_energy = mpmath.mpf(0.0)
        for cluster_id, cluster_vertex_set in enumerate(clustering):
            min_similarity_within_cluster = self._min_similarity_within_cluster(cluster_vertex_set, new_vertex_distributions[cluster_id])
            intra_cluster_energy += -mpmath.log(mpmath.exp(min_similarity_within_cluster - 1))

        # Between cluster similarity:
        #  - For each pair of clusters, we want to find the pair of words with maximum similarity
        #    and prefer this similarity value to be small.
        inter_cluster_energy = mpmath.mpf(0.0)
        if len(clustering) > 1:
            for i in range(0, len(clustering)-1):
                for j in range(i+1, len(clustering)):
                    max_similarity_between_clusters = self._max_similarity_between_clusters(clustering[i], clustering[j])
                    inter_cluster_energy += -mpmath.log(mpmath.exp(-max_similarity_between_clusters))

        # prior on clustering complexity: prefer small number of clusters.
        length_energy = -mpmath.log(mpmath.exp(-len(clustering)))

        # classification: prefer small number of categories.
        class_energy = 0.0
        if self._classifier is not None:
            num_classes = self._calculate_num_of_categories(clustering, new_vertex_distributions)
            class_energy = -mpmath.log(mpmath.exp(-(abs(num_classes-len(clustering)))))

        # classification confidence: maximize the classification confidence
        confidence_energy = 0.0
        for cluster_id, cluster_vertex_set in enumerate(clustering):
            (category, confidence) = self._predict_label(new_vertex_distributions[cluster_id])
            confidence_energy += -mpmath.log(confidence)

        energy += (0.5)*likelihood_energy + intra_cluster_energy + inter_cluster_energy + 30.0*length_energy + 20.0*class_energy + confidence_energy
        logging.debug('ENERGY: {0:12.6f}\t{1:12.6f}\t{2:12.6f}\t{3:12.6f}\t{4:12.6f}\t{5:12.6f}'.format(
            likelihood_energy.__float__(),
            intra_cluster_energy.__float__(),
            inter_cluster_energy.__float__(),
            length_energy.__float__(),
            class_energy.__float__(),
            confidence_energy.__float__()))
        return energy
Esempio n. 41
0
def findGaussianChangePoint( data, gammatable ):
	N = len( data )
	if N<6 : return None # can't find a cp in data this small

	# the denominator. This is the easy part.
	denom = (pi**1.5) * mpf(( N*data.var() ))**( -N/2.0 + 0.5 ) * gammatable[N]

	# BEGIN weight calculation
	# the numerator. A little trickier.
	weights=[0,0,0] # the change cannot have occurred in the last 3 points
	data2=data**2

	#initialize
	dataA=data[0:3] ; dataA2=data2[0:3] ; NA = len(dataA)
	dataB=data[3:] ; dataB2=data2[3:] ;  NB = len(dataB)
	sumA=dataA.sum() ; sumsqA=dataA2.sum()
	sumB=dataB.sum()  ; sumsqB=dataB2.sum()

	# first data point--this could be done in the loop but it's okay here
	meanA=sumA/NA ; meansumsqA = sumsqA/NA ; meanA2 = meanA**2 ; sA2=meansumsqA-meanA2
	meanB=sumB/NB ; meansumsqB = sumsqB/NB ; meanB2 = meanB**2 ; sB2=meansumsqB-meanB2

	wnumf1 = mpf(NA)**(-0.5*NA + 0.5 ) * mpf(sA2)**(-0.5*NA + 1) * gammatable[NA]
	wnumf2 = mpf(NB)**(-0.5*NB + 0.5 ) * mpf(sB2)**(-0.5*NB + 1) * gammatable[NB]
	wdenom = (sA2 + sB2) * (meanA2*meanB2)
	weights.append( (wnumf1*wnumf2)/wdenom ) 

	for i in range( 3, N-3 ):
		NA += 1	; NB -= 1
		next = data[i]
		sumA += next	; sumB -= next
		nextsq = data2[i]
		sumsqA += nextsq; sumsqB -= nextsq
		meanA=sumA/NA ; meansumsqA = sumsqA/NA ; meanA2 = meanA**2 ; sA2=meansumsqA-meanA2
		meanB=sumB/NB ; meansumsqB = sumsqB/NB ; meanB2 = meanB**2 ; sB2=meansumsqB-meanB2
		wnumf1 = mpf(NA)**(-0.5*NA + 0.5 ) * mpf(sA2)**(-0.5*NA + 1) * gammatable[NA]
		wnumf2 = mpf(NB)**(-0.5*NB + 0.5 ) * mpf(sB2)**(-0.5*NB + 1) * gammatable[NB]
		wdenom = (sA2 + sB2) * (meanA2*meanB2)
		weights.append( (wnumf1*wnumf2)/wdenom) 
	weights.extend( [0,0] ) # the change cannot have occurred at the last 2 points
	weights=array(weights)
	# END weight calculation

	num = 2.0**2.5 * abs(data.mean()) * weights.mean()
	logodds = log( num ) - log( denom ) 	
	print "num:", num, "log num:", log(num), "| denom:", denom, "log denom:", log(denom), "|| log odds:", logodds 
	
	# If there is a change point, then logodds will be greater than 0
	if logodds < 0 : return None
	return ( weights.argmax(), logodds ) 
Esempio n. 42
0
    def L_function(self):
        p = self.p
        q = self.q
        z = self.z
        PiI = mpmath.pi * 1j

        val= (
              myDilog(z)
            + (mpmath.log(z) + p * PiI) * ( mpmath.log(1 - z) + q * PiI) / 2
            - mpmath.pi ** 2 / 6)

        if self.sign == -1:
            return -val
        else:
            return val
Esempio n. 43
0
    def lnlhood(self, param):
        """ This is the function that evaluates the likelihood at each point in NDIM space

        Parameters
        ----------
        param :

        Returns
        -------

        """
        likeit=0

        #loop over all the ions
        for ii in range(self.nions):

            #parity check : make sure data and models are aligned
            if(self.data[ii][0] != self.mod_colm_tag[ii]):
                raise ValueError('Mismtach between observables and models. This is a big mistake!!!')

            #now call the interpolator for models given current ion
            mod_columns=self.interpol[ii](param)

            #check if upper limit
            if(self.data[ii][3] == -1):
                #integrate the upper limit of a Gaussian - cumulative distribution
                arg=((self.data[ii][1]-mod_columns)/(np.sqrt(2)*self.data[ii][2]))[0]
                thislike=mmath.log(0.5+0.5*mmath.erf(arg))
                likeit=likeit+float(thislike)
                #print self.data[ii][0], float(thislike), self.data[ii][1], mod_columns

            #check if lower limit
            elif(self.data[ii][3] == -2):

                #integrate the lower limit of a Gaussian - Q function
                arg=((self.data[ii][1]-mod_columns)/(np.sqrt(2)*self.data[ii][2]))[0]
                thislike=mmath.log(0.5-0.5*mmath.erf(arg))
                likeit=likeit+float(thislike)
                #print self.data[ii][0], float(thislike), self.data[ii][1], mod_columns

            #if value, just eval Gaussian
            else:

                #add the likelihood for this ion
                thislike=-1*np.log(np.sqrt(2*np.pi)*self.data[ii][2])-(self.data[ii][1]-mod_columns)**2/(2*self.data[ii][2]**2)
                likeit=likeit+thislike

        return likeit
Esempio n. 44
0
def getInvertedBits( n ):
    value = real_int( n )

    # determine how many groups of bits we will be looking at
    if value == 0:
        groupings = 1
    else:
        groupings = int( fadd( floor( fdiv( ( log( value, 2 ) ), g.bitwiseGroupSize ) ), 1 ) )

    placeValue = mpmathify( 1 << g.bitwiseGroupSize )
    multiplier = mpmathify( 1 )
    remaining = value

    result = mpmathify( 0 )

    for i in range( 0, groupings ):
        # Let's let Python do the actual inverting
        group = fmod( ~int( fmod( remaining, placeValue ) ), placeValue )

        result += fmul( group, multiplier )

        remaining = floor( fdiv( remaining, placeValue ) )
        multiplier = fmul( multiplier, placeValue )

    return result
Esempio n. 45
0
def kummer_log(a,b,x):
    ## First try using the funcion in the library.
    ## If it is 0 or inf then we try to use our own implementation with logs
    ## If it does not converge, then we return None !!
    a = float(a); b = float(b); x = float(x)
    
#    f_scipy = scipy_hyp1f1(a,b,x)
    f_mpmath = mpmath.hyp1f1(a,b,x)
    f_log = float(mpmath.log(f_mpmath))
    if (np.isinf(f_log) == True):
#        warnings.warn("hyp1f1() is 'inf', trying log version,  (a,b,x) = (%f,%f,%f)" %(a,b,x),UserWarning, stacklevel=2)
        f_log = kummer_own_log(a,b,x)
#        print f_log
        
    elif(f_mpmath == 0):
#        warnings.warn("hyp1f1() is '0', trying log version, (a,b,x) = (%f,%f,%f)" %(a,b,x),UserWarning, stacklevel=2)
        raise RuntimeError('Kummer function is 0. Kappa = %f', "Kummer_is_0", x)
#        f_log = kummer_own_log(a,b,x)  # TODO: We cannot do negative x, the functions is in log
    else:
#        f_log = np.log(f_scipy)
        f_log = f_log
#        print (a,b,x)
#        print f_log
        
    f_log = float(f_log)
    return f_log
def run_gth_solve(A, verbose=0):
    """
    gth_solve returns a stochastic vector x such that x A = 0
    for an irreducible transition rate matrix A.
    """
    if verbose > 1:
        print("original matrix (gth_solve):\n", A)

    x = mp.gth_solve(A)

    if verbose > 1:
        print("x\n", x)

    eps = mp.exp(0.8 * mp.log(mp.eps))  # test_eigen.py

    # x is a solution to x A = 0
    err0 = mp.norm(x*A, p=1)
    if verbose > 0:
        print("|xA| (gth_solve):", err0)
    assert err0 < eps

    # x is a nonnegative vector
    if verbose > 0:
        print("min(x) (gth_solve):", min(x))
    assert min(x) >= 0 - eps

    # 1-norm of x is one
    err1 = mp.fabs(mp.norm(x, p=1) - 1)
    if verbose > 0:
        print("||x| - 1| (gth_solve):", err1)
    assert err1 < eps
def run_stoch_eig(P, verbose=0):
    """
    stoch_eig returns a stochastic vector x such that x P = x
    for an irreducible stochstic matrix P.
    """
    if verbose > 1:
        print("original matrix (stoch_eig):\n", P)

    x = mp.stoch_eig(P)

    if verbose > 1:
        print("x\n", x)

    eps = mp.exp(0.8 * mp.log(mp.eps))  # From test_eigen.py

    # x is a left eigenvector of P with eigenvalue unity
    err0 = mp.norm(x*P-x, p=1)
    if verbose > 0:
        print("|xP - x| (stoch_eig):", err0)
    assert err0 < eps

    # x is a nonnegative vector
    if verbose > 0:
        print("min(x) (stoch_eig):", min(x))
    assert min(x) >= 0 - eps

    # 1-norm of x is one
    err1 = mp.fabs(mp.norm(x, p=1) - 1)
    if verbose > 0:
        print("||x| - 1| (stoch_eig):", err1)
    assert err1 < eps
Esempio n. 48
0
def base3int(x):
    x = int(x)
    exponents = range(int(log(x, 3)), -1, -1)
    for e in exponents:
        d = int(x // (3 ** e))
        x -= d * (3 ** e)
        yield d
Esempio n. 49
0
def gauss_warp_arb(X, l1, l2, lw, x0):
    r"""Warps the `X` coordinate with a Gaussian-shaped divot.

    .. math::

        l = l_1 - (l_1 - l_2) \exp\left ( -4\ln 2\frac{(X-x_0)^2}{l_{w}^{2}} \right )

    Parameters
    ----------
    X : :py:class:`Array`, (`M`,) or scalar float
        `M` locations to evaluate length scale at.
    l1 : positive float
        Global value of the length scale.
    l2 : positive float
        Pedestal value of the length scale.
    lw : positive float
        Width of the dip.
    x0 : float
        Location of the center of the dip in length scale.

    Returns
    -------
    l : :py:class:`Array`, (`M`,) or scalar float
        The value of the length scale at the specified point.
    """
    if isinstance(X, scipy.ndarray):
        if isinstance(X, scipy.matrix):
            X = scipy.asarray(X, dtype=float)
        return l1 - (l1 - l2) * scipy.exp(-4.0 * scipy.log(2.0) * (X - x0)**2.0 / (lw**2.0))
    else:
        return l1 - (l1 - l2) * mpmath.exp(-4.0 * mpmath.log(2.0) * (X - x0)**2.0 / (lw**2.0))
def makeMeasOneDot_lognorm(func, xdot, b:list, c:dict, Ve=[]):
    """
        MPMATH+CAPABLE

    :param func: векторная функция
    :param expplan: план эксперимента (список значений вектора x)
    :param b: вектор b
    :param c: вектор c
    :param Ve: ковариационная матрица (np.array)
    :param n: объём выборки y
    :param outfilename: имя выходного файла, куда писать план
    :param listOfOutvars: список выносимых переменных
    :return: список экспериментальных данных в формате списка словарей 'x':..., 'y':...
    """

    y=func(xdot,b,c) #вернёт mpm.matrix
    if y is None: #если функция вернула чушь, то в measdata её не записывать!
        return None


    #Внесём возмущения:
    if Ve is not None:

        ydisps=np.diag(Ve)

        for k in range(len(y)):
                #y[k]=random.normalvariate(y[k], math.sqrt(ydisps[k]))

                y[k]=math.exp(  random.normalvariate(np.longdouble ( mpm.log(y[k])),   math.sqrt(ydisps[k])))


    return y
Esempio n. 51
0
def test_beta():
    np.random.seed(1234)

    b = np.r_[np.logspace(-200, 200, 4),
              np.logspace(-10, 10, 4),
              np.logspace(-1, 1, 4),
              -1, -2.3, -3, -100.3, -10003.4]
    a = b

    ab = np.array(np.broadcast_arrays(a[:,None], b[None,:])).reshape(2, -1).T

    old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec
    try:
        mpmath.mp.dps = 400

        assert_func_equal(sc.beta,
                          lambda a, b: float(mpmath.beta(a, b)),
                          ab,
                          vectorized=False,
                          rtol=1e-10)

        assert_func_equal(
            sc.betaln,
            lambda a, b: float(mpmath.log(abs(mpmath.beta(a, b)))),
            ab,
            vectorized=False,
            rtol=1e-10)
    finally:
        mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec
Esempio n. 52
0
 def log_likelihood(self, document):
     log_likelihood = mpmath.mpf(0.0)
     for word_type in WORD_TYPES:
         for word_id in document.word_ids[word_type]:
             if word_id in self._vertex_distribution[word_type]:
                 log_likelihood += mpmath.log(self._vertex_distribution[word_type][word_id] + 1e-100)
     return log_likelihood
Esempio n. 53
0
def compute_MI_origemcee(seq_matQ,seq_matR,batches,ematQ,ematR,gamma,R_0):
    # preliminaries
    n_seqs = len(batches)
    n_batches = int(batches.max()) + 1 # assumes zero indexed batches
    n_bins = 1000
    
    #energies = sp.zeros(n_seqs)
    f = sp.zeros((n_batches,n_seqs))
    
    # compute energies
    # for i in range(n_seqs):
    #     energies[i] = sp.sum(seqs[:,:,i]*emat)
    # alternate way
    energies = np.zeros(n_seqs)
    for i in range(n_seqs):
    	RNAP = (seq_matQ[:,:,i]*ematQ).sum()
    	TF = (seq_matR[:,:,i]*ematR).sum() + R_0
    	energies[i] = -RNAP + mp.log(1 + mp.exp(-TF - gamma)) - mp.log(1 + mp.exp(-TF))


    # sort energies
    inds = sp.argsort(energies)
    for i,ind in enumerate(inds):
        f[batches[ind],i] = 1.0/n_seqs # batches aren't zero indexed
        

    # bin and convolve with Gaussian
    f_binned = sp.zeros((n_batches,n_bins))
    
    for i in range(n_batches):
        f_binned[i,:] = sp.histogram(f[i,:].nonzero()[0],bins=n_bins,range=(0,n_seqs))[0]
    #f_binned = f_binned/f_binned.sum()
    f_reg = sp.ndimage.gaussian_filter1d(f_binned,0.04*n_bins,axis=1)
    f_reg = f_reg/f_reg.sum()

    # compute marginal probabilities
    p_b = sp.sum(f_reg,axis=1)
    p_s = sp.sum(f_reg,axis=0)

    # finally sum to compute the MI
    MI = 0
    for i in range(n_batches):
        for j in range(n_bins):
            if f_reg[i,j] != 0:
                MI = MI + f_reg[i,j]*sp.log2(f_reg[i,j]/(p_b[i]*p_s[j]))
    print MI
    return MI,f_reg
Esempio n. 54
0
 def volume(self):
     z = self.z
     val = (  mpmath.arg(1 - z) * mpmath.log(abs(z))
            + myDilog(z).imag)
     if self.sign == -1:
         return -val
     else:
         return val
Esempio n. 55
0
def get_prob_poisson(events, length, rate):
    """ P(k, lambda = t * rate) = """
    avg_events = mpmath.fmul(rate, length) # lambda
    prob = mpmath.fmul((-1), avg_events)
    for i in range(1, events + 1):
        prob = mpmath.fadd(prob, mpmath.log(mpmath.fdiv(avg_events, i)))
    prob = mpmath.exp(prob)
    return prob
def test_stoch_eig_fp():
    P = mp.fp.matrix([[0.9 , 0.075, 0.025],
                      [0.15, 0.8  , 0.05 ],
                      [0.25, 0.25 , 0.5  ]])
    x_expected = mp.fp.matrix([[0.625, 0.3125, 0.0625]])
    x = mp.fp.stoch_eig(P)
    eps = mp.exp(0.8 * mp.log(mp.eps))  # test_eigen.py
    err0 = mp.norm(x-x_expected, p=1)
    assert err0 < eps
def test_gth_solve_fp():
    P = mp.fp.matrix([[-0.1, 0.075, 0.025],
                      [0.15, -0.2 , 0.05 ],
                      [0.25, 0.25 , -0.5 ]])
    x_expected = mp.fp.matrix([[0.625, 0.3125, 0.0625]])
    x = mp.fp.gth_solve(P)
    eps = mp.exp(0.8 * mp.log(mp.eps))  # test_eigen.py
    err0 = mp.norm(x-x_expected, p=1)
    assert err0 < eps