def concentration_analytic(x,t):
	N = 30 # upper limit for the sum
	c = 0
	d1 = 2*np.sqrt(d*t)
	for i in range (0,N):
		c += erfc((1-x+2*i) / d1) - erfc((1+x+2*i) / d1)
	return c
def concentration_gradient(x,t):
    exact = (lam/(D*beta)) * (
               np.exp(-beta*(x))
                - 0.5*np.exp(-beta*(x))*erfc((2.0*beta*D*t-(x))/np.sqrt(4.0*D*t))
                - 0.5*np.exp(beta*(x))*erfc((2.0*beta*D*t+(x))/np.sqrt(4.0*D*t))
        )
    return exact
Esempio n. 3
0
def expected(x,t):
    porosity = 0.3
    alphal = 0.2
    v = 1.05e-3 / porosity
    D = alphal * v
    return 0.5 * erfc((x - v * t)/(2 *np.sqrt(D * t))) + np.sqrt(v * v * t/(np.pi * D)) * \
    np.exp(- (x - v * t)**2/(4 * D * t)) - 0.5 * (1 + v * x / D + v * v * t / D) * np.exp(v * x / D) *\
    erfc((x+v*t)/(2*np.sqrt(D*t)))
def analytic(x,t):
    D=1
    N = 30                                # upper limit for the sum
    c = 0
    d = 2*np.sqrt(D*t)
    for i in range (0,N):
        c += erfc((1-x+2*i) / d) - erfc((1+x+2*i) / d)
    return c
def Csq(x,sig,a):
    X=x/(np.sqrt(2.0)*sig)
    A=(x+a)/(np.sqrt(2.0)*sig)
    Am=(x-a)/(np.sqrt(2.0)*sig)
    A=x/(np.sqrt(2.0)*sig)+a
    Am=x/(np.sqrt(2.0)*sig)-a
    csq=erfc(Am)-erfc(A)
    return (erfc(Am)-erfc(A))/csq[0]
def endf(c,x):          #error function to fit end of tubes
    if c[3]<0:
        c[3]=0
    
    if c[0]>0:
        return c[0]*erfc((c[1]-x)/c[2]) + c[3]
    if c[0]<0:
        return -2*c[0] + c[0]*erfc((c[1]-x)/c[2]) + c[3]
Esempio n. 7
0
 def I1(Smax):
     A1 = (Ni/2.)*((G/alpha/V)**0.5)
     A2 = Smax
     C1 = erfc(upart)
     C2 = 0.5*((sgi/Smax)**2)
     C3a = np.exp(9.*(log_sig**2)/2.)
     C3b = erfc(upart + 3.*log_sig/np.sqrt(2.))
     return A1*A2*(C1 - C2*C3a*C3b)
Esempio n. 8
0
def _probit_inplace(x):
    '''
    Probit of every element of the given array, calculated in-place
    '''
    x *= -OneOverSqrtTwo
    fns.erfc(x, out=x)
    x /= 2

    return x
def transient_temp_1D(z,t,G,vz,kappa):
    # Calculate T separately for case where t = 0 to avoid divide-by-zero warnings
    if t == 0:
        T = G * z
    else:
        T = G * (z + vz * t) + (G / 2.0) * ((z - vz * t) * np.exp(-(vz * z) / kappa) *\
        erfc((z - vz * t) / (2.0 * np.sqrt(kappa * t))) - (z + vz * t) * \
        erfc((z + vz * t) / (2.0 * np.sqrt(kappa * t))))
    return T
Esempio n. 10
0
 def pair_fn(i, j, d, delta):
     energy = 0.0
     #Dipole-Dipole (only term for this test)
     fac1 = erfc(alpha*d) + 2.0*alpha*d/np.sqrt(np.pi)*np.exp(-alpha**2*d**2)
     fac2 = 3.0*erfc(alpha*d) + 4.0*alpha**3*d**3/np.sqrt(np.pi)*np.exp(-alpha**2*d**2) \
             + 6.0*alpha*d/np.sqrt(np.pi)*np.exp(-alpha**2*d**2)
     energy += np.dot( pair_pot.dipoles[i,:] , pair_pot.dipoles[j,:] )*fac1/d**3 - \
                      1.0*np.dot(pair_pot.dipoles[i,:],delta)*np.dot(delta,pair_pot.dipoles[j,:])*fac2/d**5
     return energy
Esempio n. 11
0
def radford_peak(x, x0, sigma, a, hstep, htail, tau, bg0, bg1):
    bg_term = bg0 + x*bg1
    
    step = a * hstep * erfc( (x - x0)/(sigma * np.sqrt(2)) )
    
    le_tail = a * htail * erfc( (x - x0)/(sigma * np.sqrt(2))  + sigma/(tau*np.sqrt(2)) ) * np.exp( (x-x0)/tau ) / (2 * tau * np.exp( -(sigma / (np.sqrt(2)*tau))**2 ))
    
    gauss_term = gaussian(x, x0, sigma, a, htail)

    return  gauss_term+bg_term + step + le_tail
def theoreticalQAM(M,EbN0dB):
    '''
    this function generates the theoretical error rate for a QAM modulated
    signal where the number of lattice points is M and of the form
    2^k=M
    '''
    from scipy import special
    k = 1/((2/3.0)*(M-1))**.5
    return [log(2*(1-1/M**(.5))*special.erfc(k*(10**(eb/10))**.5) - 
    (1-2/M**.5 + 1.0/M)*(special.erfc(k*(10**(eb/10))**.5))**2)/log(10) for eb in EbN0dB]
def gaussian_pulse_shaping_filter(BT,sps,T):
	T = 0.05
	sps = 18
	Ts = T/sps
	t = np.arange(-2*T,2*T,Ts)
	B = BT/T
	alpha = (2*np.pi*B)/np.sqrt(np.log(2))
	gauss = (1/(2*T))*(0.5*special.erfc((alpha*(2*t-(T/2)))/np.sqrt(2)) - 0.5*special.erfc((alpha*(2*t+(T/2)))/np.sqrt(2)))
	K = (np.pi/2)/np.sum(gauss)
	y = K * gauss
	return y
Esempio n. 14
0
def g2c_direct(t, c1, l1, l2, d):
    sig = 0.3
    c2 = -1
    a = c1*0.5*np.exp(-l1*(t-d) +
        0.5*(sig**2.0)*(l1**2.0))*(erfc(-(t-d)/np.sqrt(2.0*sig**2.0) + l1*(sig)/np.sqrt(2.0)))

    b = c2*0.5*np.exp(l2*(t-d) +
        0.5*(sig**2.0)*(l2**2.0))*(erfc((t-d)/np.sqrt(2.0*sig**2.0) + l2*(sig)/np.sqrt(2.0)))

    y = 1 + a + b
    return y
Esempio n. 15
0
 def F(self,z): #function describing how photo-z erros soften bin edges
     if self.sigz0==0: #tophat
         width=(self.zmaxnom-self.zminnom)/2.
         sig=width*self.sharpness
         result = 0.5*(np.tanh((z-self.zminnom)/sig)+np.tanh((self.zmaxnom-z)/sig))
     else: #inside full z range, finite z uncertainty
         sigzroot2 = self.sigz(z)*np.sqrt(2.)
         lowerpart=.5*erfc((self.zminnom-z)/sigzroot2)
         upperpart=-.5*erfc((self.zmaxnom-z)/sigzroot2)
         result= lowerpart+upperpart
     return result
Esempio n. 16
0
def diffusion_analytic(t, h, V0, dy, viscosity):
    y = np.arange(0, h + dy, dy)
    eta1 = h / (2 * (t * viscosity)**0.5)
    eta = y / (2 * (t * viscosity)**0.5)
    sum1 = 0
    sum2 = 0
    for n in range(0, 1000):
        sum1 = sum1 + erfc(2 * n * eta1 + eta)
        sum2 = sum2 + erfc(2 * (n + 1) * eta1 - eta)
    V_analytic = V0 * (sum1 - sum2)
    return V_analytic
Esempio n. 17
0
 def erfc(x):
     ''' erfc(x), x is ADVar or scalar '''
     if not isinstance(x, ADVar):
         return special.erfc(x)
     
     r = ADVar()
     r.val = special.erfc(float(x))
     tmp = - 2./math.sqrt(math.pi) * math.exp(-float(x)*float(x))
     
     for i,dx in x.deriv:
         r.deriv.append( (i, tmp*dx) )
     return r
	def gaussian_pulse_shaping_filter(self,BT,T,ts):
		#T = 0.05
		sps = 18
		ts = T/sps
#		t = np.arange(-T,T,ts)
		t = np.linspace(-T,T,num=27*3)
		B = BT/T
		alpha = (2*np.pi*B)/np.sqrt(np.log(2))
		gauss = (1/(2*T))*(0.5*special.erfc((alpha*(2*t-(T/2)))/np.sqrt(2)) - 0.5*special.erfc((alpha*(2*t+(T/2)))/np.sqrt(2)))
		K = (np.pi/2)/np.sum(gauss)
		y = K * gauss
		return y
Esempio n. 19
0
def hypermet(x, amplitude=1.0, center=0., sigma=1.0, step=0, tail=0, gamma=0.1):
    """
    hypermet function to simulate XRF peaks and/or Compton Scatter Peak

    Arguments
    ---------
      x          array of ordinate (energy) values
      amplitude  overall scale factor
      center     peak centroid
      sigma      Gaussian sigma
      step       step parameter for low-x erfc step [0]
      tail       amplitude of tail function         [0]
      gamma      slope of tail function             [0.1]


    Notes
    -----
    The function is given by (with error checking for
    small values of sigma, gamma and s2 = sqrt(2) and
    s2pi = sqrt(2*pi)):

        arg  = (x - center)/sigma
        gauss = (1.0/(s2pi*sigma)) * exp(-arg**2 /2)
        sfunc = step * max(gauss) * erfc(arg/2.0) / 2.0
        tfunc = tail * exp((x-center)/(gamma*sigma)) * erfc(arg/s2 + 1.0/gamma))
        hypermet = amplitude * (gauss + sfunc + tfunc) / 2.0

    This follows the definitions given in
        ED-XRF SPECTRUM EVALUATION AND QUANTITATIVE ANALYSIS
        USING MULTIVARIATE AND NONLINEAR TECHNIQUES
        P. Van Espen, P. Lemberge
        JCPDS-International Centre for Diffraction Data 2000,
        Advances in X-ray Analysis,Vol.43 560

    But is modified slightly to better preserve area with changing tail and gamma

    """
    sigma = max(1.e-8, sigma)
    gamma = max(1.e-8, gamma)
    arg   = (x - center)/sigma
    arg[where(arg>100)] = 100.0
    arg[where(arg<-100)] = -100.0
    gscale = s2pi*sigma
    gauss = (1.0/gscale) * exp(-arg**2 / 2.0)
    sfunc = step * erfc(arg/2.0) / (2.0*gscale)

    targ = (x-center)/(gamma*sigma)
    targ[where(targ>100)] = 100.0
    targ[where(targ<-100)] = -100.0

    tfunc = exp(targ) * erfc(arg/2.0 + 1.0/gamma)
    tfunc = tail*tfunc / (max(tfunc)*gscale)
    return amplitude * (gauss + sfunc + tfunc) /2.0
Esempio n. 20
0
 def F(self,n,z): #function describing how photo-z erros soften bin edges
     minedge = self.zedges[n]
     maxedge = self.zedges[n+1]
         
     if self.sigz0==0:
         result= smoothedtophat(z,minedge,maxedge)#float((z>=minedge)*(z<maxedge)) #tophat fn
     else: #inside full z range, finite z uncertainty
         sigzroot2 = self.sigz(z)*np.sqrt(2.)
         lowerpart=.5*erfc((minedge-z)/sigzroot2)
         upperpart=-.5*erfc((maxedge-z)/sigzroot2)
         result= lowerpart+upperpart
     return result
Esempio n. 21
0
def difffull(x1, x2, t, C0, C1, D):
    """
    Simple equation for the diffusion into a semi-infinite slab, see Crank 1975
    C0 is the concentration in the core
    C1 is the concentration at the border
    D is the diffusion coefficient in log10 unit, m^2.s^-1
    x1 and x2 are the profil lengths from beginning and end respectively, in meters
    t is the time in seconds
    """
    x = (x2-x1)
    Cx = (C1 - C0) * ( erfc(x / (2. * np.sqrt((10**D)*t))) +  erfc((x) / (2. * np.sqrt((10**D)*t))))+ C0

    return Cx
Esempio n. 22
0
def hypermet(x, amplitude, center, sigma, step=0, tail=0, gamma=0.1):
    """
    hypermet function to simulate XRF peaks and/or Compton Scatter Peak

    Arguments
    ---------
      x          array of ordinate (energy) values
      amplitude  overall scale factor
      center     peak centroid
      sigma      Gaussian sigma
      step       step parameter for low-x erfc step [0]
      tail       amplitude of tail function         [0]
      gamma      slope of tail function             [0.1]


    Notes
    -----
    The function is given by (with error checking for
    small values of sigma, gamma and s2 = sqrt(2) and
    s2pi = sqrt(2*pi)):

        arg  = (x - center)/sigma
        gaus = exp(-arg**2/2.0) / (s2pi*sigma)
        step = step * erfc(arg/s2) / (2*center)
        tail = tail * exp(arg/gamma) * erfc(arg/s2 + 1.0/(s2*gamma))
        tail = tail / (2*sigma*gamma*exp(-1.0/(2*gamma**2)))

        hypermet = amplitude * (peak + step + tail)

    This follows the definitions given in
        ED-XRF SPECTRUM EVALUATION AND QUANTITATIVE ANALYSIS
        USING MULTIVARIATE AND NONLINEAR TECHNIQUES
        P. Van Espen, P. Lemberge
        JCPDS-International Centre for Diffraction Data 2000,
        Advances in X-ray Analysis,Vol.43 560

    """

    sigma = max(1.e-8, sigma)
    gamma = max(0.1, gamma)
    arg   = (x - center)/sigma

    gaus = exp(-arg**2/2.0) / (s2pi*sigma)

    step = step * erfc(arg/s2) / (2*center)

    tail = tail * exp(arg/gamma) * erfc(arg/s2 + 1.0/(s2*gamma))
    tail = tail / (2*sigma*gamma*exp(-1.0/(2*gamma**2)))

    return amplitude * (gaus + step + tail)
Esempio n. 23
0
 def _lerfc(self,t):
     ''' numerically safe implementation of f(t) = log(1-erf(t)) = log(erfc(t))'''
     from scipy.special import erfc
     f  = np.zeros_like(t)
     tmin = 20; tmax = 25
     ok = t<tmin                              # log(1-erf(t)) is safe to evaluate
     bd = t>tmax                              # evaluate tight bound
     nok = np.logical_not(ok)
     interp = np.logical_and(nok,np.logical_not(bd)) # interpolate between both of them
     f[nok] = np.log(old_div(2,np.sqrt(np.pi))) -t[nok]**2 -np.log(t[nok]+np.sqrt( t[nok]**2+old_div(4,np.pi) ))
     lam = old_div(1,(1+np.exp( 12*(0.5-old_div((t[interp]-tmin),(tmax-tmin))) )))   # interp. weights
     f[interp] = lam*f[interp] + (1-lam)*np.log(erfc( t[interp] ))
     f[ok] += np.log(erfc( t[ok] ))             # safe eval
     return f
Esempio n. 24
0
def stefan_parameter(λ, **kw):
    """ Objective function to solve transcendental equation for the stefan
    parameter λ """

    Lf = kw.get("Lf", 1.0)
    cpl = kw.get("cpl", 10.0)
    cps = kw.get("cps", 1.0)
    ρl = kw.get("rhol", 1.0)
    ρs = kw.get("rhos", 1.0)
    κl = kw.get("kapl", 1.0)
    κs = kw.get("kaps", 1.0)
    Tl = kw.get("Tl", 1.0)
    Tm = kw.get("Tm", 0.0)
    Ts = kw.get("Ts", -1.0)

    αl = κl / (ρl * cpl)
    αs = κs / (ρs * cps)
    ν = sqrt(αl / αs)
    Stl = cpl * (Tl-Tm) / Lf
    Sts = cps * (Tm-Ts) / Lf

    res = Stl / (exp(λ*λ) * erf(λ)) \
        - Sts / (ν * exp(ν*ν*λ*λ) * erfc(ν*λ)) \
        - sqrt(π)*λ

    return res**2
Esempio n. 25
0
    def computeObservationPValue(self, siteObs):
        """
        Compute a p-value on the observation of a kinetic event
        """

        # p-value of detection -- FIXME needs much more thought here!
        # p-value computation (slightly robustified Gaussian model)
        #  emf - rms fractional error of background model
        #  em - rms error of background model = um * emf
        #  um - predicted mean of unmodified ipd from model
        #  uo - (trimmed) observed mean ipd
        #  eo - (trimmed) standard error of observed mean (std / sqrt(coverage))
        #  Null model is ~N(um, em^2 + eo^2)
        #  Then compute standard gaussian p-value = erfc((uo-um) / sqrt(2 * (em^2 + eo^2))) / 2
        # FIXME? -- right now we only detect the case where the ipd gets longer.

        um = siteObs['modelPrediction']

        # FIXME -- pipe through model error
        em = 0.1 * um
        # em = model.fractionalModelError * em

        uo = siteObs['tMean']
        eo = siteObs['tErr']

        pvalue = erfc((uo - um) / sqrt(2 * (em ** 2 + eo ** 2))) / 2
        return pvalue.item()
Esempio n. 26
0
 def I(t_):
     y=exp(-b*(t-t_))/(a**2+8*D*(t-t_))*\
        exp(-2*r**2/(a**2+8*D*(t-t_)))*\
        exp(-alpha*z+alpha**2*D*(t-t_))*\
        spec.erfc((2*D*alpha*(t-t_)-z)/(sqrt(4*D*(t-t_))))
     
     return y
    def _tTest(x, y, exclude=95):
        """Compute a one-sided Welsh t-statistic."""
        with np.errstate(all="ignore"):
            def cappedSlog(v):
                q = np.percentile(v, exclude)
                v2 = v.copy()
                v2 = v2[~np.isnan(v2)]
                v2[v2 > q] = q
                v2[v2 <= 0] = 1. / (75 + 1)
                return np.log(v2)
            x1 = cappedSlog(x)
            x2 = cappedSlog(y)
            sx1 = np.var(x1) / len(x1)
            sx2 = np.var(x2) / len(x2)
            totalSE = np.sqrt(sx1 + sx2)
            if totalSE == 0:
                stat = 0
            else:
                stat = (np.mean(x1) - np.mean(x2)) / totalSE

            #df   = (sx1 + sx2)**2 / (sx1**2/(len(x1)-1) + sx2**2/(len(x2) - 1))
            #pval = 1 - scidist.t.cdf(stat, df)

            # Scipy's t distribution CDF implementaton has inadequate
            # precision.  We have switched to the normal distribution for
            # better behaved p values.
            pval = 0.5 * erfc(stat / sqrt(2))

            return {'testStatistic': stat, 'pvalue': pval}
Esempio n. 28
0
 def __test_ks(self, x):
     x = x[np.invert(np.isnan(x))]
     n = len(x)
     x = np.sort(x)
     # Get cumulative sums
     yCDF = np.array(np.arange(n)+1) / float(n)
     # Remove duplicates; only need final one with total count
     notdup = np.append(np.diff(x), 1) > 0
     x_expcdf = x[notdup]
     y_expcdf = np.append(0, yCDF[notdup])
     
     # The theoretical CDF (theocdf) is assumed to be normal  
     # with unknown mean and sigma
 
     zScores  =  (x_expcdf - np.mean(x))/np.std(x, ddof=1)
     
     mu = 0
     sigma = 1
     theocdf = 0.5 * erfc(-(zScores-mu)/(np.sqrt(2)*sigma))
     
     delta1    =  y_expcdf[0:len(y_expcdf)-1] - theocdf   # Vertical difference at jumps approaching from the LEFT.
     delta2    =  y_expcdf[1:len(y_expcdf)] - theocdf   # Vertical difference at jumps approaching from the RIGHT.
     deltacdf  =  np.abs(np.concatenate((delta1, delta2), axis=0))
 
     return  np.max(deltacdf)
Esempio n. 29
0
    def get_gamma(self, a=None):
        """
        Return the gamma correlation matrix, i.e. phi(i) = gamma(i, j)*q(j)
        """
        if a is not None:
            self.update(a)

        self.timer.start('get_gamma')

        nat = len(self.a)

        il, jl, dl, nl = get_neighbors(self.a, self.cutoff)

        if il is None:
            G = None
        else:
            G = np.zeros([nat, nat], dtype=float)
            if self.cutoff is None:
                for i, j, d in zip(il, jl, dl):
                    G[i, j] += 1.0/d
            else:
                for i, j, d in zip(il, jl, dl):
                    G[i, j] += 1.0*erfc(d/self.cutoff)/d

        self.timer.stop('get_gamma')
        return G
Esempio n. 30
0
def reflection_factor_spherical_wave(impedance, angle, distance, wavenumber):
    r"""
    Spherical wave reflection factor :math:`Q`.

    :param impedance: Normalized impedance :math:`Z`.
    :param angle: Angle of incidence :math:`\theta`.
    :param distance: Path length of the reflected ray :math:`r`.
    :param wavenumber: Wavenumber :math:`k`.

    The spherical wave relfection factor :math:`Q` is given by

    .. math:: Q = R \left(1 - R \right) F

    where :math:`R` is the plane wave reflection factor as calculated in :func:`reflection_factor_plane_wave` and :math:`F` is given by

    .. math:: F = 1 - j \sqrt{ \pi} w e^{-w^2} \mathrm{erfc} \left( j w \right)

    where :math:`w` is the numerical distance as calculated in :func:`numerical_distance`.
    """
    w = numerical_distance(impedance, angle, distance, wavenumber)

    F = 1.0 - 1j * np.sqrt(np.pi) * w * np.exp(-w**2.0) * erfc(1j * w)

    plane_factor = reflection_factor_plane_wave(impedance, angle)
    return plane_factor * (1.0 - plane_factor) * F

def brnoisecalc(lowP, highP):
    return lowP.n * cmuxnoisecalc(highP, highP.α)


# https://tches.iacr.org/index.php/TCHES/article/view/8793
def iknoisecalc(lowP, highP, funcP):
    return highP.n * (2**(-2 * (funcP.basebit * funcP.t) +
                          1)) + funcP.t * highP.n * (lowP.α**2)


def gbnoisecalc(lowP, highP, funcP):
    return brnoisecalc(lowP, highP) + iknoisecalc(lowP, highP, funcP)


# FA parameter

d = 1600
m = 9

fanoise = d * cmuxnoisecalc(lvl1param, lvl1param.α)
print(fanoise)
print(erfc(1 / (2 * 2 * 2 * np.sqrt(2 * fanoise))))
print(
    np.prod([(1 - mpfr(
        erfc(1 /
             (2 * 2 * 2 *
              np.sqrt(2 *
                      (i + 1) * cmuxnoisecalc(lvl1param, lvl1param.α))))))**(
                          m * lvl1param.n) for i in range(d)]))
def rv_posterior_predictive_distribution_credible_regions(
        model_like, df, tmin, tmax, Ntimes=200, Nsample=1000, **kwargs):
    r"""
    Compute credible regions of the posterior predictive distribution of the RV
    signal using a radvel model, a dataframe of posterior samples, and a range
    of times.
    
    Arguemnts
    ---------
    model_like : radvel.likelihood
        Likelihood used for the forward-modeling of the RV signal.
    df : pandas.DataFrame
        Dataframe containing posterior sample of 
        parameters taken by model_like
    tmin : float
        Minimum time of time range for computing RV signal predictive posterior
    tmax : float
        Maximum time of time range for computing RV signal predictive posterior
    Ntimes : int, optional
        Number of times to sample between tmin and tmax.
        Default value is 200.
    Nsample : int, optional
        Number of posterior samples to generate RV signals for.
        Default value is 1000.

    Other Arguments
    ---------------
    levels : array-like, optional
        Credible region levels to return.
        Default values correspond to 1,2, and 3$\sigma$
    full_sample : bool, optional
        Return the underlying sample of RV signals in addition to credible regions.
        Default is False.

    Returns
    -------
    time : ndarray (N,)
        Time values of posterior predictive distribution values.
    lower : ndarray (M,N)
        Lower values bounding the credible regions given by 'levels' of the poserior
        predictive distribution.
        Shape is (M,N) where M is the number of credible regions and N is the number
        of times.
    upper : ndarray (M,N)
        Upper values bounding the credible regions given by 'levels' of the poserior
        predictive distribution.
        Shape is (M,N) where M is the number of credible regions and N is the number
        of times.
    normalized_residual_info : ndarray (3,Nobs)
        Information on the normalized residuals of the fit to the observations.
        Contains the median and ±1sigma values of the normalized residuals for 
        each observation point.
    sample : ndarray, (N,Nsample), optional
        If 'full_sample' is True, contains the full sample of the posterior predictive
        distribution used to compute the credible regoins.
    """
    levels = kwargs.pop('levels',
                        np.array([erf(n / np.sqrt(2)) for n in range(1, 4)]))
    full_sample = kwargs.pop('full_sample', False)
    rv_out = np.zeros((Nsample, Ntimes))
    Nobs = len(model_like.x)
    normalized_resids = np.zeros((Nobs, Nsample))
    times = np.linspace(tmin, tmax, Ntimes)
    for k in range(Nsample):
        rv_out[k] = np.infty
        # Avoid unstable posterior points
        while np.any(np.isinf(rv_out[k])):
            i = np.random.randint(0, len(df) - 1)
            pars = df.iloc[i]
            vpars = pars[model_like.list_vary_params()]
            model_like.set_vary_params(vpars)
            rv_out[k] = model_like.model(times)
            ebs = model_like.errorbars()
            normalized_resids[:, k] = model_like.residuals() / ebs
    lo, hi = [], []
    for lvl in levels:
        hi.append(np.quantile(rv_out.T, 0.5 + lvl / 2, axis=1))
        lo.append(np.quantile(rv_out.T, 0.5 - lvl / 2, axis=1))
    normalized_residual_quantiles = np.quantile(normalized_resids,
                                                (0.5, erf(1), erfc(1)),
                                                axis=1)
    if full_sample:
        return times, lo, hi, normalized_residual_quantiles, rv_out
    return times, lo, hi, normalized_residual_quantiles
Esempio n. 33
0
 def emg(mu, amp, sigma, tau):
     return amp \
            * np.exp(0.5 * (2.0 * mu + sigma**2.0 / tau - 2.0 * x) / tau) \
            * erfc((mu + sigma**2.0 / tau - x) / (2.0**0.5 * sigma))
Esempio n. 34
0
def acetylAnalytical(x, telapsed):
    z = x / np.sqrt(4 * D0 * telapsed)
    acetyl = 1 - np.exp(-p0 * arate * telapsed * (
        (1 + 2 *
         (z**2)) * scis.erfc(z) - 2 * z * np.exp(-(z**2)) / np.sqrt(np.pi)))
    return acetyl
Esempio n. 35
0
 def df(x):
     return _k * _special.erfc(x) - _np.exp(-x**2)
Esempio n. 36
0
def mback(energy,
          mu,
          group=None,
          order=3,
          z=None,
          edge='K',
          e0=None,
          emin=None,
          emax=None,
          whiteline=None,
          leexiang=False,
          tables='chantler',
          fit_erfc=False,
          return_f1=False,
          _larch=None):
    """
    Match mu(E) data for tabulated f''(E) using the MBACK algorithm and,
    optionally, the Lee & Xiang extension

    Arguments:
      energy, mu:    arrays of energy and mu(E)
      order:         order of polynomial [3]
      group:         output group (and input group for e0)
      z:             Z number of absorber
      edge:          absorption edge (K, L3)
      e0:            edge energy
      emin:          beginning energy for fit
      emax:          ending energy for fit
      whiteline:     exclusion zone around white lines
      leexiang:      flag to use the Lee & Xiang extension
      tables:        'chantler' (default) or 'cl'
      fit_erfc:      True to float parameters of error function
      return_f1:     True to put the f1 array in the group

    Returns:
      group.f2:      tabulated f2(E)
      group.f1:      tabulated f1(E) (if return_f1 is True)
      group.fpp:     matched data
      group.mback_params:  Group of parameters for the minimization

    References:
      * MBACK (Weng, Waldo, Penner-Hahn): http://dx.doi.org/10.1086/303711
      * Lee and Xiang: http://dx.doi.org/10.1088/0004-637X/702/2/970
      * Cromer-Liberman: http://dx.doi.org/10.1063/1.1674266
      * Chantler: http://dx.doi.org/10.1063/1.555974
    """
    order = int(order)
    if order < 1: order = 1  # set order of polynomial
    if order > MAXORDER: order = MAXORDER

    ### implement the First Argument Group convention
    energy, mu, group = parse_group_args(energy,
                                         members=('energy', 'mu'),
                                         defaults=(mu, ),
                                         group=group,
                                         fcn_name='mback')
    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(mu.shape) > 1:
        mu = mu.squeeze()

    group = set_xafsGroup(group, _larch=_larch)

    if e0 is None:  # need to run find_e0:
        e0 = xray_edge(z, edge, _larch=_larch)[0]
    if e0 is None:
        e0 = group.e0
    if e0 is None:
        find_e0(energy, mu, group=group)

    ### theta is an array used to exclude the regions <emin, >emax, and
    ### around white lines, theta=0.0 in excluded regions, theta=1.0 elsewhere
    (i1, i2) = (0, len(energy) - 1)
    if emin is not None: i1 = index_of(energy, emin)
    if emax is not None: i2 = index_of(energy, emax)
    theta = np.ones(len(energy))  # default: 1 throughout
    theta[0:i1] = 0
    theta[i2:-1] = 0
    if whiteline:
        pre = 1.0 * (energy < e0)
        post = 1.0 * (energy > e0 + float(whiteline))
        theta = theta * (pre + post)
    if edge.lower().startswith('l'):
        l2 = xray_edge(z, 'L2', _larch=_larch)[0]
        l2_pre = 1.0 * (energy < l2)
        l2_post = 1.0 * (energy > l2 + float(whiteline))
        theta = theta * (l2_pre + l2_post)

    ## this is used to weight the pre- and post-edge differently as
    ## defined in the MBACK paper
    weight1 = 1 * (energy < e0)
    weight2 = 1 * (energy > e0)
    weight = np.sqrt(sum(weight1)) * weight1 + np.sqrt(sum(weight2)) * weight2

    ## get the f'' function from CL or Chantler
    if tables.lower() == 'chantler':
        f1 = f1_chantler(z, energy, _larch=_larch)
        f2 = f2_chantler(z, energy, _larch=_larch)
    else:
        (f1, f2) = f1f2(z, energy, edge=edge, _larch=_larch)
    group.f2 = f2
    if return_f1: group.f1 = f1

    n = edge
    if edge.lower().startswith('l'): n = 'L'
    params = Group(
        s=Parameter(1, vary=True, _larch=_larch),  # scale of data
        xi=Parameter(50, vary=fit_erfc, min=0, _larch=_larch),  # width of erfc
        em=Parameter(xray_line(z, n, _larch=_larch)[0],
                     vary=False,
                     _larch=_larch),  # erfc centroid
        e0=Parameter(e0, vary=False, _larch=_larch),  # abs. edge energy
        ## various arrays need by the objective function
        en=energy,
        mu=mu,
        f2=group.f2,
        weight=weight,
        theta=theta,
        leexiang=leexiang,
        _larch=_larch)
    if fit_erfc:
        params.a = Parameter(1, vary=True, _larch=_larch)  # amplitude of erfc
    else:
        params.a = Parameter(0, vary=False, _larch=_larch)  # amplitude of erfc

    for i in range(order):  # polynomial coefficients
        setattr(params, 'c%d' % i, Parameter(0, vary=True, _larch=_larch))

    fit = Minimizer(match_f2, params, _larch=_larch, toler=1.e-5)
    fit.leastsq()

    eoff = energy - params.e0.value
    normalization_function = params.a.value * erfc(
        (energy - params.em.value) / params.xi.value) + params.c0.value
    for i in range(MAXORDER):
        j = i + 1
        attr = 'c%d' % j
        if hasattr(params, attr):
            normalization_function = normalization_function + getattr(
                getattr(params, attr), 'value') * eoff**j

    group.fpp = params.s * mu - normalization_function
    group.mback_params = params
Esempio n. 37
0
def approxLognormal(N,
                    mu=0.0,
                    sigma=1.0,
                    tail_N=0,
                    tail_bound=[0.02, 0.98],
                    tail_order=np.e):
    '''
    Construct a discrete approximation to a lognormal distribution with underlying
    normal distribution N(mu,sigma).  Makes an equiprobable distribution by
    default, but user can optionally request augmented tails with exponentially
    sized point masses.  This can improve solution accuracy in some models.
    
    Parameters
    ----------
    N: int
        Number of discrete points in the "main part" of the approximation.
    mu: float
        Mean of underlying normal distribution.
    sigma: float
        Standard deviation of underlying normal distribution.
    tail_N: int
        Number of points in each "tail part" of the approximation; 0 = no tail.
    tail_bound: [float]
        CDF boundaries of the tails vs main portion; tail_bound[0] is the lower
        tail bound, tail_bound[1] is the upper tail bound.  Inoperative when
        tail_N = 0.  Can make "one tailed" approximations with 0.0 or 1.0.
    tail_order: float
        Factor by which consecutive point masses in a "tail part" differ in
        probability.  Should be >= 1 for sensible spacing.
        
    Returns
    -------
    pmf: np.ndarray
        Probabilities for discrete probability mass function.
    X: np.ndarray
        Discrete values in probability mass function.
        
    Written by Luca Gerotto
    Based on Matab function "setup_workspace.m," from Chris Carroll's
      [Solution Methods for Microeconomic Dynamic Optimization Problems]
      (http://www.econ2.jhu.edu/people/ccarroll/solvingmicrodsops/) toolkit.
    Latest update: 11 February 2017 by Matthew N. White
    '''
    # Find the CDF boundaries of each segment
    if sigma > 0.0:
        if tail_N > 0:
            lo_cut = tail_bound[0]
            hi_cut = tail_bound[1]
        else:
            lo_cut = 0.0
            hi_cut = 1.0
        inner_size = hi_cut - lo_cut
        inner_CDF_vals = [
            lo_cut + x * N**(-1.0) * inner_size for x in range(1, N)
        ]
        if inner_size < 1.0:
            scale = 1.0 / tail_order
            mag = (1.0 - scale**tail_N) / (1.0 - scale)
        lower_CDF_vals = [0.0]
        if lo_cut > 0.0:
            for x in range(tail_N - 1, -1, -1):
                lower_CDF_vals.append(lower_CDF_vals[-1] +
                                      lo_cut * scale**x / mag)
        upper_CDF_vals = [hi_cut]
        if hi_cut < 1.0:
            for x in range(tail_N):
                upper_CDF_vals.append(upper_CDF_vals[-1] +
                                      (1.0 - hi_cut) * scale**x / mag)
        CDF_vals = lower_CDF_vals + inner_CDF_vals + upper_CDF_vals
        temp_cutoffs = list(
            stats.lognorm.ppf(CDF_vals[1:-1], s=sigma, loc=0,
                              scale=np.exp(mu)))
        cutoffs = [0] + temp_cutoffs + [np.inf]
        CDF_vals = np.array(CDF_vals)

        # Construct the discrete approximation by finding the average value within each segment
        K = CDF_vals.size - 1  # number of points in approximation
        pmf = CDF_vals[1:(K + 1)] - CDF_vals[0:K]
        X = np.zeros(K)
        for i in range(K):
            zBot = cutoffs[i]
            zTop = cutoffs[i + 1]
            tempBot = (mu + sigma**2 - np.log(zBot)) / (np.sqrt(2) * sigma)
            tempTop = (mu + sigma**2 - np.log(zTop)) / (np.sqrt(2) * sigma)
            if tempBot <= 4:
                X[i] = -0.5 * np.exp(mu + (sigma**2) * 0.5) * (
                    erf(tempTop) - erf(tempBot)) / pmf[i]
            else:
                X[i] = -0.5 * np.exp(mu + (sigma**2) * 0.5) * (
                    erfc(tempBot) - erfc(tempTop)) / pmf[i]
    else:
        pmf = np.ones(N) / N
        X = np.exp(mu) * np.ones(N)
    return [pmf, X]
Esempio n. 38
0
    l = np.exp(-alp * xauxf**pf)
    L = np.diag(l)
elif OP == 2:
    l = (np.pi * xauxf)**(-1) * np.sin(np.pi * xauxf)
    l[0] = 1.0
    L = np.diag(l)
elif OP == 3:
    l = (1 + np.cos(np.pi * xauxf)) / 2
    L = np.diag(l)
elif OP == 4:
    l = (1 + np.cos(np.pi * xauxf)) / 2
    l = l**4 * (35 - 84 * l + 70 * l**2 - 20 * l**3)
    L = np.diag(l)
elif OP == 5:
    #l = 0.5*scs.erfc(2*pf**0.5*(np.abs(xauxf) - 0.5)*np.sqrt((-np.log(1-4*(xauxf-0.5)**2))/(4*(xauxf-0.5)**2)))
    l = 0.5 * scs.erfc(2 * pf**0.5 * (np.abs(xauxf) - 0.5))
    L = np.diag(l)
elif OP == 6:
    l = 0.5 * scs.erfc(2 * pf**0.5 * (np.abs(xauxf) - 0.5) * np.sqrt(
        (-np.log(1 - 4 * (xauxf - 0.5)**2)) / (4 * (xauxf - 0.5)**2)))
    if (np.mod(i, 2) == 0):
        l[int(i / 2)] = 0.5
    L = np.diag(l)

#what if L is the identity
# L = np.identity(j)
# L[i,i] = 0
# L[i-1,i-1] = 0

#M = inv[B.T@W@B]@B.T@W, lets proof the properties of B@W in all the possibilities
WP1 = W1 @ P1
Esempio n. 39
0
 def logpdf(x, mu, sigma):
     xmin = np.min(x)
     F = lambda x: (sp.erfc((np.log(x) - mu) / (np.sqrt(2) * sigma))) / 2
     g = lambda x: F(x) - F(x + 1)
     h = -np.log(F(xmin)) + np.log(g(x))
     return h
def pVIC(x, A, alpha, beta, R, gamma, sigma, k, xp):
    """
    The pseudo-Voigt-Ikeda-Carpenter function is the convolution of a pseudo-Voigt
    profile with the Ikeda-Carpenter function. 
    See Nuclear Instruments and Methods in Physics Research, A239 (1985) 536-544
    and the "time of flight" computation behind the FullProf software, explained
    at http://www.ccp14.ac.uk/ccp/web-mirrors/plotr/Tutorials&Documents/TOF_FullProf.pdf

    Parameters
    ----------
    x : Numerical value (float)
        Independent 'pVIC' function variable, which is the position on the x axis.
    A : Float
        Amplitude of the pseudo-Voigt-Ikeda-Carpenter function.
    alpha : Float
        IC (Ikeda-Carpenter) fast decay parameter.
    beta : Float
        IC slow decay parameter.
    R : Float
        IC weight ratio between fast and slow decays.
    gamma : Float
        pV (pseudo-Voigt) Lorentzian width.
    sigma : Float
        pV Gaussian width.
    k : Float
        IC "approximation" parameter (not sure what it does).
        Its value is set to 0.05 by default.
    xp : Float
        Position of peak (not max).

    Returns
    -------
    TYPE: float
        Value of the pseudo-Voigt-Ikeda-Carpenter function at position x in reciprocal space.

    """
    xr = x - xp  # position of current datapoint in reciprocal space, relative to peak position

    def pseudoVoigtFWHM(gamma, sigma):
        fL = 2 * gamma
        fG = 2 * sigma * np.sqrt(2 * np.log(2))
        Gamma = (fG**5 + 2.69269*fG**4*fL + 2.42843*fG**3*fL**2 \
            + 4.47163*fG**2*fL**3 + 0.07842*fG*fL**4 + fL**5)**(1/5)
        return Gamma

    def pseudoVoigtEta(gamma, sigma):
        fL = 2 * gamma
        f = pseudoVoigtFWHM(gamma, sigma)
        fLbyf = fL / f  # note: because of this line, one cannot have both gamma and sigma set to zero
        eta = 1.36603 * fLbyf - 0.47719 * fLbyf**2 + 0.11116 * fLbyf**3
        return eta

    Gamma = pseudoVoigtFWHM(gamma, sigma)
    eta = pseudoVoigtEta(gamma, sigma)

    sigmaSq = Gamma**2 / (8 * np.log(2))
    gWidth = np.sqrt(2 * sigmaSq)

    am = alpha * (1 - k)
    ap = alpha * (1 + k)

    x = am - beta
    y = alpha - beta
    z = ap - beta

    zs = -alpha * xr + 1j * alpha * Gamma / 2
    zu = (1 - k) * zs
    zv = (1 + k) * zs
    zr = -beta * xr + 1j * beta * Gamma / 2

    u = am * (am * sigmaSq - 2 * xr) / 2
    v = ap * (ap * sigmaSq - 2 * xr) / 2
    s = alpha * (alpha * sigmaSq - 2 * xr) / 2
    r = beta * (beta * sigmaSq - 2 * xr) / 2

    n = (1 / 4) * alpha * (1 - k**2) / k**2
    nu = 1 - R * am / x
    nv = 1 - R * ap / z
    ns = -2 * (1 - R * alpha / y)
    nr = 2 * R * alpha**2 * beta * k**2 / (x * y * z)

    yu = (am * sigmaSq - xr) / gWidth
    yv = (ap * sigmaSq - xr) / gWidth
    ys = (alpha * sigmaSq - xr) / gWidth
    yr = (beta * sigmaSq - xr) / gWidth

    val = A*n*((1 - eta)*(nu*np.exp(u)*erfc(yu) + nv*np.exp(v)*erfc(yv)\
        + ns*np.exp(s)*erfc(ys) + nr*np.exp(r)*erfc(yr))\
        - eta*2/np.pi*(np.imag(nu*exp1(zu)*np.exp(zu)) + np.imag(nv*exp1(zv)*np.exp(zv))\
        + np.imag(ns*exp1(zs)*np.exp(zs)) + np.imag(nr*exp1(zr)*np.exp(zr))))

    return val
Esempio n. 41
0
plt.title("Constellation diagram, with SNR = 6")
plt.scatter(real1, Y1, c='b')
plt.axis([-10, 10, -20, 20])
plt.show()
plt.grid(True)
plt.title("Constellation diagram, with SNR = 6")
plt.scatter(real2, Y2, color='g')
plt.axis([-10, 10, -20, 20])
plt.show()
# erwthhma 1e
Yax = [0] * 16
Xax = [0] * 16
SNR = np.linspace(0, 16, 16 * 5000)
Ber_th = [0] * 80000
for i in range(80000):
    Ber_th[i] = 0.5 * special.erfc(math.sqrt(10**(i * 0.0002 / 10)))
for snr in range(16):
    Xax[snr] = snr
    n0 = (A**2) * Tb / 10**(snr / 10)
    correct = 0
    for i in range(100000):
        bit = random.randint(0, 1)
        if bit == 1:
            bit_tr = A
        else:
            bit_tr = -A
        x1 = np.random.normal(0, math.sqrt(n0 / 2))
        x2 = np.random.normal(0, math.sqrt(n0 / 2))
        bit_noise = bit_tr * math.sqrt(Tb) + x1
        if bit_tr * bit_noise >= 0:  # αν το αρχικό σήμα και το σήμα μετά τον θόρυβο έχουν ενέργειες με ίδιο πρόσημο
            correct += 1  # τότε ο θόρυβος δεν επηρεάζει την αποδιαμόρφωση
Esempio n. 42
0
def EfficInROIOpt(b):
    roiedge = ROIOpt(b) / 2.
    return 1. - spec.erfc(2.35 * roiedge / (numpy.sqrt(2.)))
Esempio n. 43
0
def gauss(x, s, mu):
    result = 1 - special.erfc((x - mu) / s) / 2
    return result
Esempio n. 44
0
def P_model_erf(T, P0, B, T0, sigma):
    return B*(T-T0)*0.5*erfc((T-T0)/(np.sqrt(2)*sigma)) \
             - B*sigma/(np.sqrt(2*np.pi))*np.exp(-(T-T0)**2/(2*sigma**2))+P0
Esempio n. 45
0
def W(r,Ion=0.1):
  k = np.sqrt(Ion) / 3.08 #Ion=0.1 is default
  x = k * r / np.sqrt(6)
  return 332.286 * np.sqrt(6 / np.pi) * (1 - np.sqrt(np.pi) * x * np.exp(x ** 2) * erfc(x)) / (e * r)
Esempio n. 46
0
    def approx(self, N, tail_N=0, tail_bound=[0.02, 0.98], tail_order=np.e):
        '''
        Construct a discrete approximation to a lognormal distribution with underlying
        normal distribution N(mu,sigma).  Makes an equiprobable distribution by
        default, but user can optionally request augmented tails with exponentially
        sized point masses.  This can improve solution accuracy in some models.

        Parameters
        ----------
        N: int
            Number of discrete points in the "main part" of the approximation.
        tail_N: int
            Number of points in each "tail part" of the approximation; 0 = no tail.
        tail_bound: [float]
            CDF boundaries of the tails vs main portion; tail_bound[0] is the lower
            tail bound, tail_bound[1] is the upper tail bound.  Inoperative when
            tail_N = 0.  Can make "one tailed" approximations with 0.0 or 1.0.
        tail_order: float
            Factor by which consecutive point masses in a "tail part" differ in
            probability.  Should be >= 1 for sensible spacing.

        Returns
        -------
        d : DiscreteDistribution
            Probability associated with each point in array of discrete
            points for discrete probability mass function.
        '''
        # Find the CDF boundaries of each segment
        if self.sigma > 0.0:
            if tail_N > 0:
                lo_cut = tail_bound[0]
                hi_cut = tail_bound[1]
            else:
                lo_cut = 0.0
                hi_cut = 1.0
            inner_size = hi_cut - lo_cut
            inner_CDF_vals = [
                lo_cut + x * N**(-1.0) * inner_size for x in range(1, N)
            ]
            if inner_size < 1.0:
                scale = 1.0 / tail_order
                mag = (1.0 - scale**tail_N) / (1.0 - scale)
            lower_CDF_vals = [0.0]
            if lo_cut > 0.0:
                for x in range(tail_N - 1, -1, -1):
                    lower_CDF_vals.append(lower_CDF_vals[-1] +
                                          lo_cut * scale**x / mag)
            upper_CDF_vals = [hi_cut]
            if hi_cut < 1.0:
                for x in range(tail_N):
                    upper_CDF_vals.append(upper_CDF_vals[-1] +
                                          (1.0 - hi_cut) * scale**x / mag)
            CDF_vals = lower_CDF_vals + inner_CDF_vals + upper_CDF_vals
            temp_cutoffs = list(
                stats.lognorm.ppf(CDF_vals[1:-1],
                                  s=self.sigma,
                                  loc=0,
                                  scale=np.exp(self.mu)))
            cutoffs = [0] + temp_cutoffs + [np.inf]
            CDF_vals = np.array(CDF_vals)

            # Construct the discrete approximation by finding the average value within each segment.
            # This codeblock ignores warnings because it throws a "divide by zero encountered in log"
            # warning due to computing erf(infty) at the tail boundary.  This is irrelevant and
            # apparently freaks new users out.
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                K = CDF_vals.size - 1  # number of points in approximation
                pmf = CDF_vals[1:(K + 1)] - CDF_vals[0:K]
                X = np.zeros(K)
                for i in range(K):
                    zBot = cutoffs[i]
                    zTop = cutoffs[i + 1]
                    tempBot = (self.mu + self.sigma**2 -
                               np.log(zBot)) / (np.sqrt(2) * self.sigma)
                    tempTop = (self.mu + self.sigma**2 -
                               np.log(zTop)) / (np.sqrt(2) * self.sigma)
                    if tempBot <= 4:
                        X[i] = -0.5 * np.exp(self.mu +
                                             (self.sigma**2) * 0.5) * (
                                                 erf(tempTop) -
                                                 erf(tempBot)) / pmf[i]
                    else:
                        X[i] = -0.5 * np.exp(self.mu +
                                             (self.sigma**2) * 0.5) * (
                                                 erfc(tempBot) -
                                                 erfc(tempTop)) / pmf[i]

        else:
            pmf = np.ones(N) / N
            X = np.exp(self.mu) * np.ones(N)
        return DiscreteDistribution(pmf, X)
Esempio n. 47
0
print(("Potential softened below", H, "kpc and truncated above", r_s, "kpc"))

####################################################################

r = np.logspace(np.log10(e_plummer) - 1.2, np.log10(box_size) + 0.2, 10000)

# Newtonian gravity
f_newton = 1 / r**2

# Simulated gravity
u = r / H
u = u[u <= 1]

W_swift = 21.0 * u**6 - 90.0 * u**5 + 140.0 * u**4 - 84.0 * u**3 + 14.0 * u
f_swift = f_newton * (special.erfc(0.5 * r / r_s) +
                      (1.0 / math.sqrt(math.pi)) *
                      (r / r_s) * np.exp(-0.25 * (r / r_s)**2))
f_swift[r <= H] = W_swift / H**2
f_swift[r > r_cut] = 0

W_gadget = u * (21.333333 - 48 * u + 38.4 * u**2 - 10.6666667 * u**3 -
                0.06666667 * u**-3)
W_gadget[u < 0.5] = u[u < 0.5] * (10.666667 + u[u < 0.5]**2 *
                                  (32.0 * u[u < 0.5] - 38.4))
f_gadget = f_newton * (special.erfc(0.5 * r / r_s) +
                       (1.0 / math.sqrt(math.pi)) *
                       (r / r_s) * np.exp(-0.25 * (r / r_s)**2))
f_gadget[r <= H] = W_gadget / H**2
f_gadget[r > r_cut] = 0
Esempio n. 48
0
def gaussianCDF(x, amplitude, mu, sigma):
    """
    CDF of gaussian is P(X<=x) = .5 erfc((mu-x)/(sqrt(2)sig))
    """
    return 0.5 * amplitude * erfc((mu - x) / (np.sqrt(2) * sigma))
Esempio n. 49
0
def err_psk_8(EsN0, coh):
    q = np.sqrt(2. * EsN0)
    ps = sp.erfc(q / np.sqrt(2.) * np.sin(np.pi / 8.))
    pb = 1. / 3. * ps  # Код Грея
    return ps, pb
Esempio n. 50
0
def densityAnalytical(x, telapsed):
    z = x / np.sqrt(4 * D0 * (telapsed))
    return scis.erfc(z)
Esempio n. 51
0
    def approx(self, N, tail_N=0, tail_bound=None, tail_order=np.e):
        '''
        Construct a discrete approximation to a lognormal distribution with underlying
        normal distribution N(mu,sigma).  Makes an equiprobable distribution by
        default, but user can optionally request augmented tails with exponentially
        sized point masses.  This can improve solution accuracy in some models.

        Parameters
        ----------
        N: int
            Number of discrete points in the "main part" of the approximation.
        tail_N: int
            Number of points in each "tail part" of the approximation; 0 = no tail.
        tail_bound: [float]
            CDF boundaries of the tails vs main portion; tail_bound[0] is the lower
            tail bound, tail_bound[1] is the upper tail bound.  Inoperative when
            tail_N = 0.  Can make "one tailed" approximations with 0.0 or 1.0.
        tail_order: float
            Factor by which consecutive point masses in a "tail part" differ in
            probability.  Should be >= 1 for sensible spacing.

        Returns
        -------
        d : DiscreteDistribution
            Probability associated with each point in array of discrete
            points for discrete probability mass function.
        '''
        tail_bound = tail_bound if tail_bound is not None else [0.02, 0.98]
        # Find the CDF boundaries of each segment
        if self.sigma > 0.0:
            if tail_N > 0:
                lo_cut = tail_bound[0]
                hi_cut = tail_bound[1]
            else:
                lo_cut = 0.0
                hi_cut = 1.0
            inner_size = hi_cut - lo_cut
            inner_CDF_vals = [
                lo_cut + x * N**(-1.0) * inner_size for x in range(1, N)
            ]
            if inner_size < 1.0:
                scale = 1.0 / tail_order
                mag = (1.0 - scale**tail_N) / (1.0 - scale)
            lower_CDF_vals = [0.0]
            if lo_cut > 0.0:
                for x in range(tail_N - 1, -1, -1):
                    lower_CDF_vals.append(lower_CDF_vals[-1] +
                                          lo_cut * scale**x / mag)
            upper_CDF_vals = [hi_cut]
            if hi_cut < 1.0:
                for x in range(tail_N):
                    upper_CDF_vals.append(upper_CDF_vals[-1] +
                                          (1.0 - hi_cut) * scale**x / mag)
            CDF_vals = lower_CDF_vals + inner_CDF_vals + upper_CDF_vals
            temp_cutoffs = list(
                stats.lognorm.ppf(CDF_vals[1:-1],
                                  s=self.sigma,
                                  loc=0,
                                  scale=np.exp(self.mu)))
            cutoffs = [0] + temp_cutoffs + [np.inf]
            CDF_vals = np.array(CDF_vals)

            K = CDF_vals.size - 1  # number of points in approximation
            pmf = CDF_vals[1:(K + 1)] - CDF_vals[0:K]
            X = np.zeros(K)
            for i in range(K):
                zBot = cutoffs[i]
                zTop = cutoffs[i + 1]
                # Manual check to avoid the RuntimeWarning generated by "divide by zero"
                # with np.log(zBot).
                if zBot == 0:
                    tempBot = np.inf
                else:
                    tempBot = (self.mu + self.sigma**2 -
                               np.log(zBot)) / (np.sqrt(2) * self.sigma)
                tempTop = (self.mu + self.sigma**2 -
                           np.log(zTop)) / (np.sqrt(2) * self.sigma)
                if tempBot <= 4:
                    X[i] = -0.5 * np.exp(self.mu + (self.sigma**2) * 0.5) * (
                        erf(tempTop) - erf(tempBot)) / pmf[i]
                else:
                    X[i] = -0.5 * np.exp(self.mu + (self.sigma**2) * 0.5) * (
                        erfc(tempBot) - erfc(tempTop)) / pmf[i]

        else:
            pmf = np.ones(N) / N
            X = np.exp(self.mu) * np.ones(N)
        return DiscreteDistribution(pmf,
                                    X,
                                    seed=self.RNG.randint(0,
                                                          2**31 - 1,
                                                          dtype='int32'))
Esempio n. 52
0
    def build_fcoll_tab(self):
        """
        Build a lookup table for the halo mass function / collapsed fraction.
        
        Can be run in parallel.
        """

        self.logMmin_tab = self.pf['hmf_logMmin']
        self.logMmax_tab = self.pf['hmf_logMmax']
        self.zmin = self.pf['hmf_zmin']
        self.zmax = self.pf['hmf_zmax']
        self.dlogM = self.pf['hmf_dlogM']
        self.dz = self.pf['hmf_dz']

        self.Nz = int((self.zmax - self.zmin) / self.dz + 1)
        self.z = np.linspace(self.zmin, self.zmax, self.Nz)

        self.Nm = np.logspace(self.logMmin_tab, self.logMmax_tab,
                              self.dlogM).size

        if rank == 0:
            print "\nComputing %s mass function..." % self.hmf_func

        # Masses in hmf are in units of Msun * h
        self.M = self.MF.M / self.cosm.h70
        self.logM = np.log10(self.M)
        self.lnM = np.log(self.M)

        self.Nm = self.M.size

        self.dndm = np.zeros([self.Nz, self.Nm])
        self.mgtm = np.zeros_like(self.dndm)
        self.ngtm = np.zeros_like(self.dndm)
        fcoll_tab = np.zeros_like(self.dndm)

        pb = ProgressBar(len(self.z), 'fcoll')
        pb.start()

        for i, z in enumerate(self.z):

            if i > 0:
                self.MF.update(z=z)

            if i % size != rank:
                continue

            # Compute collapsed fraction
            if self.hmf_func == 'PS' and self.hmf_analytic:
                delta_c = self.MF.delta_c / self.MF.growth.growth_factor(z)
                fcoll_tab[i] = erfc(delta_c / sqrt2 / self.MF._sigma_0)

            else:

                # Has units of h**4 / cMpc**3 / Msun
                self.dndm[i] = self.MF.dndm.copy() * self.cosm.h70**4
                self.mgtm[i] = self.MF.rho_gtm.copy()
                self.ngtm[i] = self.MF.ngtm.copy() * self.cosm.h70**3

                # Remember that mgtm and mean_density have factors of h**2
                # so we're OK here dimensionally
                fcoll_tab[i] = self.mgtm[i] / self.cosm.mean_density0

            pb.update(i)

        pb.finish()

        # Collect results!
        if size > 1:
            tmp = np.zeros_like(fcoll_tab)
            nothing = MPI.COMM_WORLD.Allreduce(fcoll_tab, tmp)
            _fcoll_tab = tmp

            tmp2 = np.zeros_like(self.dndm)
            nothing = MPI.COMM_WORLD.Allreduce(self.dndm, tmp2)
            self.dndm = tmp2

            tmp3 = np.zeros_like(self.ngtm)
            nothing = MPI.COMM_WORLD.Allreduce(self.ngtm, tmp3)
            self.ngtm = tmp3
            #
            #tmp4 = np.zeros_like(self.mgtm)
            #nothing = MPI.COMM_WORLD.Allreduce(self.mgtm, tmp4)
            #self.mgtm = tmp4
        else:
            _fcoll_tab = fcoll_tab

        # Fix NaN elements
        _fcoll_tab[np.isnan(_fcoll_tab)] = 0.0
        self._fcoll_tab = _fcoll_tab
Esempio n. 53
0
 def energy(hi, hj):
     X = hi * np.sign(hj) / Gamma / Q
     Ep = -Gamma * Gamma * np.log(eps +
                                  (1 - 2 * eps) * erfc(-X / sqrt2) / 2)
     return Ep
Esempio n. 54
0
        if (counter2 % 10 == 0):
            pTot[int(counter2 / 10)] = sum(p[1:width]) * dx
            aTot[int(counter2 / 10)] = sum(acetyl) * dx

            pTot2[int(counter2 / 10)] = sum(p_2[1:width] * dx)
            aTot2[int(counter2 / 10)] = sum(acetyl2) * dx

            z = x / np.sqrt(4 * D0 * (telapsed))

            pTotAnalytical[int(counter2 /
                               10)] = p0 * np.sqrt(4 * D0 * (telapsed) / np.pi)
            #pTotAnalytical[int(counter2/10)-1] = scii.quad(densityAnalytical,0,x[width-1],args=(telapsed),epsabs=1e-15)[0]
            #pTotAnalytical[int(counter2/10)-1] = sum(scis.erfc(z))
            acetylationfit = 1 - np.exp(-p0 * arate * telapsed * (
                (1 + 2 * (z**2)) * scis.erfc(z) -
                2 * z * np.exp(-(z**2)) / np.sqrt(np.pi)))
            aTotAnalytical[int(counter2 / 10)] = scii.quad(acetylAnalytical,
                                                           0,
                                                           x[width - 1],
                                                           args=(telapsed),
                                                           epsabs=1e-15)[0]
        counter2 += 1
        #p_2[0] = p0
    print((time.time() - tstart) / 60)
    pTotResidual[counter] = max(abs(pTot2 - pTotAnalytical))
    pTotSFDResidual[counter] = max(abs(pTot - pTotAnalytical))
    counter += 1
print(telapsed)
tend = time.time()
trun = (tend - tstart) / 60  #In minutes
Esempio n. 55
0
 def H(x):
     return erfc(x/sqrt2)/2
def berawgn(EbN0):
    """ Calculates theoretical bit error rate in AWGN (for BPSK and given Eb/N0) """
    return 0.5 * erfc(math.sqrt(10**(float(EbN0) / 10)))
Esempio n. 57
0
def antenna_pattern_gauss(d_az,
                          d_el,
                          antenna_3dB,
                          db=False,
                          twoway=True,
                          az_conv=None,
                          el_conv=None,
                          units='rad'):
    """
    Get the antenna weighting factor due to the azimuth and elevation offsets
    from the main antenna direction. The weighting factor is meant in terms of
    power (not amplitude of the E-field). An Gaussian antenna pattern is
    assumed.

    Parameters
    ----------
    d_az : array
        Azimuth offsets to the main antenna axis [deg]
    d_el : array
        Elevation offset to the main antenna axis [deg]
    antenna_3dB : float
        Half power beam width [deg]
    db : bool (optional)
        If true return the result in dB instead linear.
    twoway: bool (optional)
        If true, return the two-way weighting factor instead of
        the one-way factor.
    az_conv (optional): float
        If set, assumes that the antenna moves in azimuth direction (PPI) and
        averages over the angle given by this keyword [deg].
    el_conv (optional): float
        If set, assumes that the antenna moves in elevation direction (RHI)
        and averages over the angle given by this keyword [deg].
    units (optional) : str
        Specify if inputs quantities are given in radians ( "rad" ) or
        degrees ( "deg" )

    Returns
    -------
    fa : array
        Weighting factor.
    """
    if az_conv != None:
        if az_conv <= 0:
            az_conv = None
    if el_conv != None:
        if el_conv <= 0:
            el_conv = None

    if units not in ['deg', 'rad']:
        print('Invalid units, must be either "rad" or "deg", ' +
              'assuming they are "rad"')

    if units == 'deg':
        # Convert all quantities to rad
        if az_conv is not None:
            az_conv *= np.pi / 180.
        if el_conv is not None:
            el_conv *= np.pi / 180.
        d_az = d_az.copy() * np.pi / 180.
        d_el = d_el.copy() * np.pi / 180.
        antenna_3dB *= np.pi / 180.

    if az_conv == None or el_conv == None:
        if az_conv is not None:
            """ The antenna is moving in azimuth direction and is averaging the
             received pulses over 'az_conv' deg. The norm azimuth position
             of the antenna is reached, when the antenna moved half of the
             azimuth distance (az_offset = 'az_conv'/2 deg).
             The weighting factor at the azimuth position 'daz' from the norm
             position is given by the following integral:

                                1    / daz+az_offset
               fa(daz, del) = ---- * |               f(daz) d(daz)   * f(del)
                              Norm   / daz-az_offset
             where
               daz : Is the azimuth deviation from the norm antenna position
               del : Is the elevation deviation from the norm antenna position
               fa(daz,del) : Weighting factor at point (daz,del) f(0,0) must be 1.
               Norm : Normalization such that f(0,0)=1
               f(x) : Weighting factor of the non moving antenna (Gaussian
                     function, see below)

             Solving the integral above leads to:

                             K1
             fa(daz, del) = ---- * ( erf(K*(daz+az_offset)) -erf(K*(daz-az_offset)) )  * f(del)
                            Norm
             where
                   2 * sqrt(ln(2))
               K = ---------------
                       phi3db

                    sqrt(!PI)
               K1 = ---------
                      2 * K

               erf : the error function
               phi3db : the half power beam width
            """

            az_offset = az_conv / 2.
            K = 2. * np.sqrt(np.log(2)) / antenna_3dB
            K1 = np.sqrt(np.pi) / 2. / K
            Norm = 2.0 * K1 * erfc(K * az_offset)
            faz = (K1 / Norm *
                   (erfc(K * (d_az + az_offset)) - erfc(K *
                                                        (d_az - az_offset))))
        else:
            da = (2. * d_az / antenna_3dB)**2
            ind = da > 20.
            da[ind] = 20
            faz = np.exp(-da * np.log10(2))

        if el_conv is not None:
            # see explanation for az_conv above
            el_offset = el_conv / 2. * np.pi / 180.
            K = 2. * np.sqrt(np.log(2)) / antenna_3dB
            K1 = np.sqrt(np.pi) / 2. / K
            Norm = 2.0 * K1 * erfc(K * el_offset)
            fel = (K1 / Norm *
                   (erfc(K * (d_el + el_offset)) - erfc(K *
                                                        (d_el - el_offset))))
        else:
            de = (2. * d_el / antenna_3dB)**2
            ind = de > 20.
            de[ind] = 20
            fel = np.exp(-de * np.log10(2))

        fa = faz * fel
    else:
        # Gaussian antenna pattern:
        #
        # f(daz,del) = e^(-( (2*daz/phi3db_el)^2 + (2*del/phi3db_az)^2 ) * ln(2))
        #
        # from Gauss normal distribution N(x) with N(x)=1 and N(x=X0/2)=1/2 :
        #
        # N(x) = e^(-(2*x/X0)^2 * ln(2))

        da = 2. * d_az / antenna_3dB
        de = 2. * d_el / antenna_3dB
        dr = (da**2 + de**2)

        ind = dr > 20.
        dr[ind] = 20

        fa = np.exp(-dr * np.log(2))

    if twoway:
        fa = fa**2

    if db:
        fa = 10. * np.log10(fa)

    return fa
Esempio n. 58
0
def fun(x):
    return 1-0.5*special.erfc(x/2)
Esempio n. 59
0
def ber_qpsk(Eb, n_0):
    '''
    Calcula o BER de acordo com a modulação QPSK
    '''
    if (n_0 == 0): return 0
    return 1 / 2 * erfc(np.sqrt(Eb / n_0))
Esempio n. 60
0
    def _calc(self, x, y):
        """
        List based implementation of binary tree algorithm for concordance
        measure after Christensen (2005).

        """
        x = np.array(x)
        y = np.array(y)
        n = len(y)
        perm = range(n)
        perm.sort(key=lambda a: (x[a], y[a]))
        vals = y[perm]
        ExtraY = 0
        ExtraX = 0
        ACount = 0
        BCount = 0
        CCount = 0
        DCount = 0
        ECount = 0
        DCount = 0
        Concordant = 0
        Discordant = 0
        # ids for left child
        li = [None] * (n - 1)
        # ids for right child
        ri = [None] * (n - 1)
        # number of left descendants for a node
        ld = np.zeros(n)
        # number of values equal to value i
        nequal = np.zeros(n)

        for i in range(1, n):
            NumBefore = 0
            NumEqual = 1
            root = 0
            x0 = x[perm[i - 1]]
            y0 = y[perm[i - 1]]
            x1 = x[perm[i]]
            y1 = y[perm[i]]
            if x0 != x1:
                DCount = 0
                ECount = 1
            else:
                if y0 == y1:
                    ECount += 1
                else:
                    DCount += ECount
                    ECount = 1
            root = 0
            inserting = True
            while inserting:
                current = y[perm[i]]
                if current > y[perm[root]]:
                    # right branch
                    NumBefore += 1 + ld[root] + nequal[root]
                    if ri[root] is None:
                        # insert as right child to root
                        ri[root] = i
                        inserting = False
                    else:
                        root = ri[root]
                elif current < y[perm[root]]:
                    # increment number of left descendants
                    ld[root] += 1
                    if li[root] is None:
                        # insert as left child to root
                        li[root] = i
                        inserting = False
                    else:
                        root = li[root]
                elif current == y[perm[root]]:
                    NumBefore += ld[root]
                    NumEqual += nequal[root] + 1
                    nequal[root] += 1
                    inserting = False

            ACount = NumBefore - DCount
            BCount = NumEqual - ECount
            CCount = i - (ACount + BCount + DCount + ECount - 1)
            ExtraY += DCount
            ExtraX += BCount
            Concordant += ACount
            Discordant += CCount

        cd = Concordant + Discordant
        num = Concordant - Discordant
        tau = num / np.sqrt((cd + ExtraX) * (cd + ExtraY))
        v = (4. * n + 10) / (9. * n * (n - 1))
        z = tau / np.sqrt(v)
        pval = erfc(np.abs(z) / 1.4142136)  # follow scipy
        return tau, pval, Concordant, Discordant, ExtraX, ExtraY