Esempio n. 1
0
def test_solve_poisson_becke_sa():
    sigma = 8.0
    rtf = ExpRTransform(1e-4, 1e2, 500)
    r = rtf.get_radii()
    rhoy = np.exp(-0.5*(r/sigma)**2)/sigma**3/(2*np.pi)**1.5
    rhod = np.exp(-0.5*(r/sigma)**2)/sigma**3/(2*np.pi)**1.5*(-r/sigma)/sigma
    rho = CubicSpline(rhoy, rhod, rtf)
    v = solve_poisson_becke([rho])[0]

    s2s = np.sqrt(2)*sigma
    soly = erf(r/s2s)/r
    sold = np.exp(-(r/s2s)**2)*2/np.sqrt(np.pi)/s2s/r - erf(r/s2s)/r**2

    if False:
        import matplotlib.pyplot as pt
        n = 10
        pt.clf()
        pt.plot(r[:n], soly[:n], label='exact')
        pt.plot(r[:n], v.y[:n], label='spline')
        pt.legend(loc=0)
        pt.savefig('denu.png')

    assert abs(v.y - soly).max()/abs(soly).max() < 1e-6
    assert abs(v.dx - sold).max()/abs(sold).max() < 1e-4
    # Test the boundary condition at zero and infinity
    assert v.extrapolation.l == 0
    np.testing.assert_allclose(v.extrapolation.amp_left, np.sqrt(2/np.pi)/sigma)
    np.testing.assert_allclose(v.extrapolation.amp_right, 1.0)
Esempio n. 2
0
def sim(arr, amp=1.0, sigma=2.0e-9, eff=0.3, tau=1.420461e-7, kappa=1.0e-8, **kwargs):
    """ Approximate a realistic SSPALS spectra, f(t), where arr is an array of 't' (in seconds).

        Gaussian(V_0, sigma) implantation time distribution and formation of o-Ps,
        convolved with detector function -- see below.

        return:
            f(t)

        defaults:
            amp = 1.0                 # scaling factor
            sigma = 2 ns              # Gaussian width
            eff = 0.3                 # o-Ps re-emmission efficiency
            tau = 142.0461 ns         # o-Ps lifetime
            kappa = 10 ns             # detector decay time

        kwargs:
            norm = True               # normalise to max value

    """
    norm = kwargs.get("norm", True)
    # sim.
    yvals = np.exp(-arr * (1.0 / tau + 1.0 / kappa)) * (
        eff
        * np.exp((sigma ** 2.0 / (2.0 * tau ** 2.0)) + arr / kappa)
        * (1.0 + erf((arr * tau - sigma ** 2.0) / (np.sqrt(2.0) * sigma * tau)))
        - (1 + tau * (eff - 1) / kappa)
        * np.exp((sigma ** 2.0 / (2.0 * kappa ** 2.0)) + arr / tau)
        * (1.0 + erf((arr * kappa - sigma ** 2.0) / (np.sqrt(2.0) * sigma * kappa)))
    )
    if norm:
        # normalise to peak value
        yvals = yvals / max(yvals)
    return amp * yvals
Esempio n. 3
0
 def init2(self,sigma=0.05):
     """
     Initialize odes
     """
     NN = self.N / 2 + 1
     self.xi[0, :] = 1 - 0.5 * erf((self.zz + 0.5) / sigma) + 0.5 * erf((self.zz - 0.5) / sigma)
     self.xi[1, :] = self.xi[0, :]**1.01
Esempio n. 4
0
def lognormal_cdf( x, mu, sigma ):
  if sigma > 0:
    small = 0.5 + 0.5*special.erf( (np.log(1e-6)-mu)/(np.sqrt(2.0)*sigma))
    if x.__class__ == np.ndarray:
      lp = np.zeros( len(x) )
      I = pp.find( x > 1e-6 )
      J = pp.find( x <= 1e-6)
      lp[I] = 0.5 + 0.5*special.erf( (np.log(x)-mu)/(np.sqrt(2.0)*sigma))
      lp[J] = small
      return lp
    else:
      if x > 1e-6:
        return 0.5 + 0.5*special.erf( (np.log(x)-mu)/(np.sqrt(2.0)*sigma))
      else:
        return small
  else:
    if x.__class__ == np.ndarray:
      logx = np.log(x+1e-6)
      lp = 0.5*np.ones( len(x))
      I1 = pp.find( logx < mu )
      I2 = pp.find( logx > mu )
      
      lp[I1] = 0
      lp[I2] = 1
      return lp
    else:
      if np.log(x) < mu:
        return 0
      elif np.log(x) > mu:
        return 1
      else:
        return 0.5
Esempio n. 5
0
def _gausshermitebin(x,params,binsize):
    """Evaluate the integrated Gauss-Hermite function"""
    ncenter= params.shape[1]
    out= numpy.empty((ncenter,x.shape[1]))
    integ= numpy.empty((params.shape[0]-1,x.shape[1]))
    for ii in range(ncenter):
        poly= numpy.polynomial.HermiteE(params[1:,ii])
        # Convert to regular polynomial basis for easy integration
        poly= poly.convert(kind=numpy.polynomial.Polynomial)
        # Integrate and add up
        w1= (x[ii]-0.5*binsize)/params[0,ii]
        w2= (x[ii]+0.5*binsize)/params[0,ii]
        eexp1= numpy.exp(-0.5*w1**2.)
        eexp2= numpy.exp(-0.5*w2**2.)
        integ[0]= numpy.sqrt(numpy.pi/2.)\
            *(special.erf(w2/_SQRTTWO)-special.erf(w1/_SQRTTWO))
        out[ii]= poly.coef[0]*integ[0]
        if params.shape[0] > 1:
            integ[1]= -eexp2+eexp1
            out[ii]+= poly.coef[1]*integ[1]
        for jj in range(2,params.shape[0]-1):
            integ[jj]= (-w2**(jj-1)*eexp2+w1**(jj-1)*eexp1)\
                +(jj-1)*integ[jj-2]
            out[ii]+= poly.coef[jj]*integ[jj]
    return out
    def get_gaussian_kernel(self, cmtlong, cmtlat, bandwidth, celllong, 
                            celllat, spcx, spcy=None):
        """
        Return the contribution of moment tensor [cmtlong, cmtlat] to cell
        specified by midpoint [midcell] with widths [x, y]. Gaussian kernel
        definition from Zechar et al (2010).
        """
        # Get the length scales (in degrees})
        xls, yls = _get_length_scales(cmtlong, cmtlat, bandwidth)
        # Get the distances (in degrees)
        dx1, dx2, dy1, dy2 = _get_distances(cmtlong, cmtlat, celllong, celllat,
                                            spcx, spcy)

        # Find earthquakes in distance
        valid_long = np.logical_and(dx1 <= 5.92 * xls, dx2 >= -5.92 * xls)
        valid_lat = np.logical_and(dy1 <= 5.92 * yls, dy2 >= -5.92 * yls)
        select = np.where(np.logical_and(valid_long, valid_lat))[0]
        kernel = 0.25 * (erf(dx2[select] / xls) - erf(dx1[select] / xls)) *\
            (erf(dy2[select] / yls) - erf(dy1[select] / yls))
        
        if np.any(kernel < 0.):
            print cmtlong, cmtlat, celllong[select], celllat[select], \
                np.column_stack([dx1[select], dx2[select], dy1[select],
                                 dy2[select]]), kernel
            plt.plot(cmtlong, cmtlat, 's')
            plt.plot(celllong[select], celllat[select], '.')
            breaker = here
        return kernel, select
Esempio n. 7
0
def truncated_normal(shape=None, mu=0., sigma=1., x_min=None, x_max=None):
    """
    Generates random variates from a lower-and upper-bounded normal distribution

    @param shape: shape of the random sample
    @param mu:    location parameter 
    @param sigma: width of the distribution (sigma >= 0.)
    @param x_min: lower bound of variate
    @param x_max: upper bound of variate    
    @return: random variates of lower-bounded normal distribution
    """
    from scipy.special import erf, erfinv
    from numpy.random import standard_normal
    from numpy import inf, sqrt

    if x_min is None and x_max is None:
        return standard_normal(shape) * sigma + mu
    elif x_min is None:
        x_min = -inf
    elif x_max is None:
        x_max = inf
        
    x_min = max(-1e300, x_min)
    x_max = min(+1e300, x_max)
    var = sigma ** 2 + 1e-300
    sigma = sqrt(2 * var)
    
    a = erf((x_min - mu) / sigma)
    b = erf((x_max - mu) / sigma)

    return probability_transform(shape, erfinv, a, b) * sigma + mu
    def TAnalytic(self):
        """
            z     = vector of z coordinate postions such that 0 is surface and bed is negative thickness
            w     = vertical velocity in meters per second. also a vector of the same size as z
            H     = ice thickness
            ubar  = vertical average of the horizontal speed in meters per second.
            alpha = surface slope as a ratio of rise to run.
            lamb  = elevational lapse rate
            Qgeo  = geothermal heat flow
            Ts    = Surface temperature (mean annual)
            OUTPUT:
            T     = a vector of temperatures (almost)
        """
        z = self.Z
        w = self.w_z / spy
        H = self.Z[0] - self.Z[-1]
        ubar  = (self.u_b + 0.8 * (self.u_s - self.u_b)) / spy #np.average([self.u_s, self.u_b]) / spy
        alpha = deg2ratio(self.pz_spx)
        lamb  = self.lmbda
        Qgeo  = self.Q_geo
        Ts    = self.theta_s 
        k    = self.k    # Thermal conductivity of ice W/m/K
        rhoi = self.lrho # Density of ice kg/m^3 
        cp   = self.cp   # Heat Capacity of ice J/K/kg
        w    = w * spy
        ubar = ubar * spy 
        xphi = np.sqrt(np.abs(w[0] - w[-1]) / (2 * H * (k/(rhoi * cp)*spy)))
        coef = 2. * ubar * alpha * lamb * H / (w[0] - w[-1])
        
        T = Ts - Qgeo / (k * xphi) * np.sqrt(np.pi)/2. * \
            (ss.erf(xphi*H) - ss.erf(xphi * (-z))) \
            + coef * (ss.dawsn(xphi * H) - ss.dawsn(xphi * (-z)))

        return [T[-1::-1]]  # Be careful with this, your coordinate system may be different.
Esempio n. 9
0
def check_solve_s2(rtf, sigma=1.0, epsy=1e-10, epsd=1e-10):
    r = rtf.get_radii()
    z = np.zeros(len(r))

    b = CubicSpline(2/r, -2/r**2, rtf)
    a = CubicSpline(z, z, rtf)

    rhoy = np.exp(-0.5*(r/sigma)**2)/sigma**3/(2*np.pi)**1.5
    rhod = np.exp(-0.5*(r/sigma)**2)/sigma**3/(2*np.pi)**1.5*(-r/sigma)/sigma
    fy = -4*np.pi*rhoy
    fd = -4*np.pi*rhod
    f = CubicSpline(fy, fd, rtf)

    bcs = (None, 0.0, 1.0/r[-1], None)

    u = solve_ode2(b, a, f, bcs)


    s2s = np.sqrt(2)*sigma
    soly = erf(r/s2s)/r
    sold = np.exp(-(r/s2s)**2)*2/np.sqrt(np.pi)/s2s/r - erf(r/s2s)/r**2
    if False:
        print abs(u.y - soly).max()/abs(soly).max()
        print abs(u.dx - sold).max()/abs(sold).max()
        import matplotlib.pyplot as pt
        pt.clf()
        pt.plot(r, u.y, label='numerical')
        pt.plot(r, soly, label='exact')
        pt.legend(loc=0)
        pt.savefig('s2.png')
    assert abs(u.y - soly).max()/abs(soly).max() < epsy
    assert abs(u.dx - sold).max()/abs(sold).max() < epsd
Esempio n. 10
0
 def log_volume(self, box):
     bottomcorner, topcorner = box        
     assert len(bottomcorner) == len(topcorner)
     assert len(bottomcorner) == self.dim
     top = 0.5*(1 + erf((topcorner-self.mu)/(np.sqrt(2)*self.sigma)))
     bottom = 0.5*(1 + erf((bottomcorner-self.mu)/(np.sqrt(2)*self.sigma)))   
     return np.log(top - bottom).sum()
Esempio n. 11
0
def discretizedGaussian(amp, mu, cov, grid):
    '''Convenience method for discretized Gaussian evaluation'''
    #eigenvalue decomposition of precision matrix
    P = la.inv(cov)     #precision matrix
    evl, M = la.eig(P)
    
    #assert np.allclose(np.diag(evl), iM.dot(cov).dot(M))
    #check if covariance is positive definite
    if np.any(evl < 0):
        raise ValueError('Covariance matrix should be positive definite')
    
    
    #make column vector for arithmetic
    mu = np.array(mu, ndmin=grid.ndim, dtype=float).T
    evl = np.array(evl, ndmin=grid.ndim, dtype=float).T

    xm = grid - mu #(2,...) shape
    
    #return M, xm
    
    f = np.sqrt(2 / evl)
    pf = np.sqrt(np.pi / evl)
    td0 = np.tensordot(M, xm + 0.5, 1)
    td1 = np.tensordot(M, xm - 0.5, 1)
    w = pf*(erf(f*td0) - erf(f*td1))
    
    return amp * np.prod(w, axis=0)
Esempio n. 12
0
 def quant(self,q,jk,sigma=True):
     """
     NAME:
        quant
     PURPOSE:
        return the quantile of the M_x distribution at this J-K
     INPUT:
        q - desired quantile in terms of 'sigma'
        jk - J-Ks
        sigma= if False, the quantile is the actual quantile
     OUTPUT:
        quantile
     HISTORY:
        2012-11-09 - Written - Bovy (IAS)
     """
     #First calculate the inverse cumulative distribution
     interpInvCumul= self.calc_invcumul(jk)
     if not sigma:
         return interpInvCumul(q)
     else:
         if q > 0.:
             arg= 1.-(1.-special.erf(q/numpy.sqrt(2.)))/2.
         else:
             arg= (1.-special.erf(-q/numpy.sqrt(2.)))/2.
         return interpInvCumul(arg)
Esempio n. 13
0
def SLD_calculations(z, sample, inst):
    ''' Calculates the scatteringlength density as at the positions z
    '''
    parameters = sample.resolveLayerParameters()
    dens = array(parameters['dens'], dtype = complex64)
    mag_dens = array(parameters['mag_dens'], dtype = complex64)
    fc = array(parameters['fc'], dtype = complex64)
    sldc = dens*fc
    d_sldc = sldc[:-1] - sldc[1:]
    fm1 = array(parameters['fm1'], dtype = complex64)
    fm2 = array(parameters['fm2'], dtype = complex64)
    sldm1 = mag_dens*fm1
    sldm2 = mag_dens*fm2
    d_sldm1 = sldm1[:-1] - sldm1[1:]
    d_sldm2 = sldm2[:-1] - sldm2[1:]
    d = array(parameters['d'], dtype = float64)
    d = d[1:-1]
    # Include one extra element - the zero pos (substrate/film interface)
    int_pos = cumsum(r_[0,d])
    sigma = int_pos*0.0+1e-7
    if z == None:
        z = arange(min(-sigma[0]*5, -5), max(int_pos.max()+sigma[-1]*5, 5), 0.5)
    rho_c = sum(d_sldc*(0.5 - 0.5*erf((z[:,newaxis]-int_pos)/sqrt(2.)/sigma)), 1) + sldc[-1]
    rho_m1 = sum(d_sldm1*(0.5 - 0.5*erf((z[:,newaxis]-int_pos)/sqrt(2.)/sigma)), 1) + sldm1[-1]
    rho_m2 = sum(d_sldm2*(0.5 - 0.5*erf((z[:,newaxis]-int_pos)/sqrt(2.)/sigma)), 1) + sldm2[-1]
    
    return {'real charge sld': real(rho_c), 'imag charge sld': imag(rho_c),
            'real mag_1 sld': real(rho_m1), 'imag mag_1 sld': imag(rho_m1),
            'real mag_2 sld': real(rho_m2), 'imag mag_2 sld': imag(rho_m2),
            'z':z}
Esempio n. 14
0
def eta0Maxwellian(vmin, vobs, v0bar, vesc):
    """ Velocity integral eta0 in a Standard Halo Model with Maxwellian velocity
    distribution.
    Input:
        vmin: float
            Minumum DM velocity.
        vobs: float
            Observed velocity.
        v0bar: float
            Velocity dispersion.
        vesc: float
            Excape velocity.
    Returns:
        eta0: float
    """
    x = vmin/v0bar
    y = vobs/v0bar
    z = vesc/v0bar
    erfz = erf(z)
    sqrt_pi = np.sqrt(pi)
    exp_z_sq = np.exp(-z**2)
    exp_z_sq_z = np.exp(-z**2) * z
    eta = list(map(lambda i: -2. * exp_z_sq / sqrt_pi - erf(i-y) / (2.*y) +
                   erf(i+y) / (2.*y)
                   if i + y <= z
                   else exp_z_sq * (i - y - z) / (sqrt_pi * y) - erf(i-y) / (2.*y) +
                   erfz / (2.*y)
                   if i - y <= z < i + y
                   else 0, x))
    return eta / (-2. * exp_z_sq_z / sqrt_pi + erfz) / v0bar
Esempio n. 15
0
def eta1Maxwellian(vmin, vobs, v0bar, vesc):
    """ Same as eta0Maxwellian, but this is the modulation velocity integral
    eta1 = d eta0 / d vobs * delta_v.
        delta_v = v_Earth * cos(gamma) where the velocity of the Earth is
    v_Earth = 30 km/s and is inclined at an angle of gamma = 60 deg wrt the
    galactic plane.
    Returns:
        eta1: float
    """
    x = vmin/v0bar
    y = vobs/v0bar
    z = vesc/v0bar
    delta_v = 15.  # 30 * cos(60 * pi / 180)
    erfz = erf(z)
    sqrt_pi = np.sqrt(pi)
    exp_z_sq = np.exp(-z**2)
    exp_z_sq_z = np.exp(-z**2) * z
    erf_z = erf(z)
    eta = list(map(lambda i: (np.exp(-(i+y)**2) + np.exp(-(i-y)**2)) / (sqrt_pi * y) +
                   (erf(i-y) - erf(i+y)) / (2 * y**2)
                   if i + y <= z
                   else exp_z_sq * (-i + z) / (sqrt_pi * y**2) +
                   np.exp(-(i-y)**2) / (sqrt_pi * y) + (erf(i-y) - erf_z) / (2 * y**2)
                   if i - y <= z < i + y
                   else 0, x))
    return eta / (-2. * exp_z_sq_z / sqrt_pi + erfz) * delta_v / v0bar**2
Esempio n. 16
0
def int_erf(a, r):
    '''Returns the integral of an error function with integration extremes (r+a), (r-a)'''
    if r=='+inf': return 4. * a
    S,D = r+a, r-a
    erf_part = S*erf(S) - D*erf(D)
    exp_part = 1./(math.sqrt(math.pi)) * ( math.exp(-S**2) - math.exp(-D**2) )
    return 2. * ( erf_part + exp_part )
Esempio n. 17
0
def _lambda(lambda_value, z, R, mag, mlim, Cfilter, Lfilter, bx, mag_err=0,
            sigma_R=0.05, Ro=0.9, Rs=0.15, Rcore=0.1, beta=0.2, maxiter=1000):
    """
    bx is background per sq. deg.
    """
    Rc = Ro * (lambda_value/100.)**beta
    j = (R < Rc)
    # the radius in radians
    d = numpy.pi/180 * cosmology.dProj(z, Rc, input_unit='Mpc', unit='deg')
    # the prefactor (180/pi)**2 is to convert to sq. deg.
    area = (180/numpy.pi)**2 * 2*numpy.pi * (1 - numpy.cos(d/2.))
    Rfilter = filter_radius(R, Rc=Rc, Rs=Rs, Rcore=Rcore)
    ux = 2*numpy.pi*R*Rfilter * Cfilter * Lfilter
    ux[ux == 0] = 1e-100
    bg = 2 * numpy.pi * R * bx * area
    px = numpy.zeros(ux.shape)
    # smooth edges (Rozo et al. 2015, Appendix B)
    thetaL = 0.5 * (1 + erf((mlim - mag[j])/mag_err[j]))
    thetaR = 0.5 * (1 + erf((Rc - R[j])/sigma_R))
    #thetaL = thetaR = 1
    eq = lambda n: n - (n*ux[j] * thetaL * thetaR / (n*ux[j] + bg[j])).sum()
    try:
        richness = optimize.newton(eq, lambda_value, maxiter=maxiter)
    except RuntimeError:
        # doing this means that the loop
        print 'Hit RuntimeError with maxiter=%d' %maxiter
        return lambda_value, px, Rfilter, Rc
    #print 'z=%.2f Rc=%.2f area=%.4f No=%.1f N=%.1f' \
          #%(z, Rc, area, lambda_value, richness)
    px[j] = richness * ux[j] / (richness * ux[j] + bg[j])
    return richness, px, Rfilter, Rc
Esempio n. 18
0
def test_solve_poisson_becke_gaussian_dipole():
    sigma = 8.0
    rtf = ExpRTransform(1e-4, 8e1, 200)
    r = rtf.get_radii()
    # By deriving a Gaussian charge distribution with respect to z, we get
    # rho(\mathbf{r})=Y_1^0(\Omega) rhoy, with rhoy as given below
    # Note that rhoy is simply the derivative of a Gaussian charge distribution
    # with respect to r.
    rhoy = -r/sigma**2*np.exp(-0.5*(r/sigma)**2)/sigma**3/(2*np.pi)**1.5
    rhod = (-1.0+r**2/sigma**2)/sigma**2*np.exp(-0.5*(r/sigma)**2)/sigma**3/(2*np.pi)**1.5
    rho = CubicSpline(rhoy, rhod, rtf)
    v = solve_poisson_becke([rho]*4)[1] #Not interested in first spline

    s2s = np.sqrt(2)*sigma
    # The potential corresponding to Y_1^0(\Omega), can be found by deriving
    # the potential of a Gaussian charge distribution with respect to r
    soly = np.exp(-(r/s2s)**2)*2/np.sqrt(np.pi)/s2s/r - erf(r/s2s)/r**2
    sold = 2.0*erf(r/s2s)/r**3 - 2*2/np.sqrt(np.pi)*np.exp(-(r/s2s)**2)/s2s/r**2 - 2*2/np.sqrt(np.pi)/s2s**3*np.exp(-(r/s2s)**2)

    if False:
        import matplotlib.pyplot as pt
        n = 200
        pt.clf()
        pt.plot(r[:n], -soly[:n], label='exact',marker='*')
        pt.plot(r[:n], -v.y[:n], label='spline',marker='*')
        pt.xscale('log')
        pt.yscale('log')
        pt.legend(loc=0)
        pt.savefig('poisson_gdipole.png')

    assert abs(v.y - soly).max()/abs(soly).max() < 1e-6
    assert abs(v.dx - sold).max()/abs(sold).max() < 1e-4
Esempio n. 19
0
 def describeDisc(self,Hfact=.002597943,T0=16.7,plIndex=-0.5,TIndex=0.,Rin=5.,Rout=100.,Mstar=1.,Mdisc=.1,G=887.2057):
   if not self.N:
     return
   self.Hfact=Hfact
   self.T0=T0
   self.plIndex=plIndex
   self.TIndex=TIndex
   self.Rin=Rin
   self.Rout=Rout
   self.Mstar=Mstar
   self.Mdisc=Mdisc
   self.G=G
   #All these properties only apply to the gas particles..
   self.subsetTmp([(0,None)])
   #We can infer some stuff if we have data...
   self.Mdisc=np.sum(self.m_)
   self.Rin=self.R_.min()
   self.Rout=self.R_.max()
   if plIndex==-2:
     self.D=np.log2(self.Rout/self.Rin)
   else:
     self.D=(1./(self.plIndex+2.))*(self.Rout**(self.plIndex+2)-self.Rin**(self.plIndex+2))
   self.AscaleH_=self.Hfact*np.sqrt(self.T0/(self.Mstar*(self.Rin**self.TIndex)))*self.R_**((self.TIndex+3.)/2.)
   self.Arho_=(self.Mdisc/(2.*np.pi*self.D))*(self.R_**self.plIndex)*(np.exp((-1.*self.z_**2)/(2.*self.AscaleH_**2))/np.sqrt(2.*np.pi*self.AscaleH_**2))
   self.AcdSur_=((self.Mdisc*self.R_**self.plIndex)/(4.*np.pi*self.D))*(1-special.erf(np.abs(self.z_)/np.sqrt(2.*self.AscaleH_**2)))
   self.AcdMid_=(self.R_**self.plIndex)*(self.Mdisc/(4.*np.pi*self.D))*(special.erf(np.abs(self.z_)/np.sqrt(2.*self.AscaleH_**2)))
   self.AcdTot_=(self.Mdisc/(4.*np.pi*self.D))*self.R_**self.plIndex
   self.Agzstar_=-1.*(self.G*self.Mstar*self.z_)/(self.r_**3)
   self.Agzapprox_=-4.*np.pi*self.G*self.AcdMid_*np.sign(self.z_)
   self.gznostar_=self.gz_-self.Agzstar_
 def linear_term(t, xy):
     x,y = xy
     plt.scatter(_x[0], _x[1], s=100)
     sigma = scene.sigma_v
     w, h = scene.width/2.0, scene.height/2.0
     wx = scene.bbox_width/2.0
     wv = scene.bbox_velocity_width/2.0
     def np_max(*args):
         comp = lambda x,y: x*(x>y)+y*(y>x)
         return reduce(comp, args[1:], args[0])
     def np_min(*args):
         comp = lambda x,y: x*(x<y)+y*(y<x)
         return reduce(comp, args[1:], args[0])
     u_min = np_max((x-w)/t, (x-_x[0]-wx)/t, _v[0] - wv)
     v_min = np_max((y-h)/t, (y-_x[1]-wx)/t, _v[1] - wv)
     u_max = np_max(u_min, np_min((x+w)/t, (x-_x[0]+wx)/t, _v[0] + wv))
     v_max = np_max(v_min, np_min((y+h)/t, (y-_x[1]+wx)/t, _v[1] + wv))
     Prob_of_x_hat_and_v_hat = _normalizing_constant(_x, _v)
     scale = scene.P_of_c[-1] / Prob_of_x_hat_and_v_hat
     scale /= h * w * wx**2 * wv**2
     from scipy.special import erf
     rt2 = np.sqrt(2)
     out = erf( u_max/(sigma*rt2) ) - erf( u_min/(sigma*rt2) )
     out *= erf( v_max/(sigma*rt2) ) - erf( v_min/(sigma*rt2) )
     out *= 0.25 * scale
     return out
Esempio n. 21
0
def pulse_erf(t, pars):
#def pulse_erf(t, (cmax, cb, centre, width, rise, decay)):
    """
    Generate realistic concentration pulse with rise and fall from error function.

    Parameters
    ----------
    t : ndarray or float
        Time samples.
    cmax : float
        Peak concentration.
    cb : float
        background concentration.
    prepulse : float
        Time before pulse starts.
    width : float
        Pulse half width.
    rise : float
        Rise time constant for error function.
    decay : float
        Decay time constant for error function.

    Returns
    -------
    c : ndarray
        Concentration profile.
    """

    cmax, cb, centre, width, rise, decay = pars
    conc = (cmax * 0.5 *
        (erf((t - centre + width / 2.) / rise) -
        erf((t - centre - width / 2.) / decay)))
    return conc + cb
Esempio n. 22
0
def dttnorm(x0, min=0.0, max=1.0, mu=0.0, sd=1.0, **kwargs):
    """Truncated normal distribution."""
    c = 1.0/sd
    x = c * (x0 - mu)
    a = c * (min - mu) / sqrt2
    b = c * (max - mu) / sqrt2
    return c * sqrt2sqrtpi * exp(-0.5 * x**2) / ( erf(b) - erf(a) ) * (x0 > min) * (x0 < max)
Esempio n. 23
0
def omega(rho,
          rho_dark_energy,transition_width_de,
          rho_matter,transition_width_mr):
    return OMEGA_MIN \
        + DE_AMPLITUDE + MATTER_AMPLITUDE\
        + DE_AMPLITUDE*erf((rho - rho_dark_energy)/transition_width_de)\
        + MATTER_AMPLITUDE*erf((rho - rho_matter)/transition_width_mr)
def alpha2(a, N, Nmax, Nmin):
    y = sqrt(pi*Nmin*Nmax)/(2.0*a) * exp((a * log2(sqrt(Nmax/Nmin)))**2.0)
    y = y * exp((log(2.0)/(2.0*a))**2.0)
    y = y * erf(a * log2(sqrt(Nmax/Nmin)) - log(2.0)/(2.0*a)) + erf(a * log2(sqrt(Nmax/Nmin)) + log(2.0)/(2.0*a))
    y -= N

    return y # find alpha
Esempio n. 25
0
def response_curve_fit(stimArray, responseArray, type='gaussian'):
    #find best fit for frequency or bandwidth (or others) spike data
    from scipy.optimize import curve_fit
    try:
        if type=='gaussian':
            maxInd = np.argmax(responseArray)
            p0 = [stimArray[maxInd], responseArray[maxInd], 1.,0.]
            curveFit = curve_fit(gaussian, stimArray, responseArray, p0=p0, maxfev=10000)[0]
        elif type=='carandini':
            from scipy.special import erf
            maxInd = np.argmax(responseArray)
            initmExp = 2.5
            initSigmaD = stimArray[maxInd]
            initSigmaS = 2*stimArray[maxInd]
            initRD = responseArray[maxInd]/(erf(stimArray[maxInd]/(np.sqrt(2)*initSigmaD)))**initmExp
            initRS = (-1 + initRD/responseArray[maxInd])/(erf(stimArray[maxInd]/(np.sqrt(2)*initSigmaS)))**initmExp
            p0 = [initRD, initRS, initSigmaD, initSigmaS, initmExp]
            curveFit = curve_fit(carandini_form, stimArray, responseArray, p0=p0, maxfev=10000)[0]
    except RuntimeError:
        print "Could not fit {} curve to tuning data.".format(type)
        return None, None
    
    #calculate R^2 value for fit
    if type=='gaussian':
        fitResponseArray = gaussian(stimArray, curveFit[0], curveFit[1], curveFit[2], curveFit[3])
    elif type=='carandini':
        fitResponseArray = carandini_form(stimArray, curveFit[0], curveFit[1], curveFit[2], curveFit[3], curveFit[4])
    residuals = responseArray - fitResponseArray
    SSresidual = np.sum(residuals**2)
    SStotal = np.sum((responseArray-np.mean(responseArray))**2)
    Rsquared = 1-(SSresidual/SStotal)
    
    return curveFit, Rsquared
Esempio n. 26
0
    def eval(self, values, x):

        u0 = self.lam**(7.0/3.0)/((1 + self.lam**4)**(2.0/3.0))*(self.Ra/sqrt(pi)/2.0)**(2.0/3.0)

        Q = 2*sqrt(self.lam/pi/u0)
        v0 = u0
        if abs(x[0]) < DOLFIN_EPS and abs(x[1] - 1.0) > DOLFIN_EPS:
            Tu = 0.5
        elif abs(x[0]) < DOLFIN_EPS and abs(x[1] - 1.0) <= DOLFIN_EPS:
            Tu = 0.0
        else:
            Tu = 0.5*erf( (1.0-x[1])/2.0*sqrt(u0/(x[0])) )
        if abs(x[0] - 2.0) < DOLFIN_EPS:
            Tl = 0.5
        else:
            Tl = 1.0 - 0.5*erf( x[1]/2.0*sqrt(u0/(self.lam - x[0])) )

        Tr = 0.5 + Q/2.0/sqrt(pi)*sqrt(v0/(x[1] + 1.0))*exp(-x[0]*x[0]*v0/(4*x[1] + 4))
        td = (self.lam - x[0])*(self.lam - x[0])*v0
        Ts = 0.5 - Q/2.0/sqrt(pi)*sqrt(v0/(2.0 - x[1]))*exp( -td/(8.0 - 4.0*x[1]) )
        values[0] = Tr + Tu + Tl +  Ts - 1.5
        if values[0] < 0.0:
            values[0] = 0.0
        if values[0] > 1.0:
            values[0] = 1.0
Esempio n. 27
0
    def beam_flux(x):
        """Beam flux depending on BEAM_TYPE, data from database/.cfg file"""  
        
        if((par.BEAM_TYPE != 'constant' and (par.FWHM == 0 or par.SCAN_WIDTH == 0)) or\
        (par.BEAM_TYPE == 'error function' and par.ERF_BEAM_WIDTH == 0)):
            print('Check parameters!(Division by zero error)')
        else:
           sigma = par.FWHM/np.sqrt(8*np.log(2))
           Fbeam = 0
			
           if par.BEAM_TYPE == 'constant':
               Fbeam = par.BEAM_CURRENT_DENSITY/ 1.6022e-19 
           elif par.BEAM_TYPE == 'Gaussian' :
               Fbeam = (par.BEAM_CURRENT/(1.6022e-19*np.sqrt(2*np.pi)*sigma*par.SCAN_WIDTH))*\
               np.exp(-((x-par.BEAM_CENTER)**2)/(2*(sigma**2)))
           elif par.BEAM_TYPE == 'error function':        	
               x1 = par.BEAM_CENTER - par.ERF_BEAM_WIDTH/2
               x2 = par.BEAM_CENTER + par.ERF_BEAM_WIDTH/2    
               
               Fbeam = (par.BEAM_CURRENT/(1.6022e-19*2*par.SCAN_WIDTH*par.ERF_BEAM_WIDTH))*\
               (erf(-(x-x2)/(sigma*np.sqrt(2)))-erf(-(x-x1)/(sigma*np.sqrt(2))))
           return Fbeam
        return 0
    
    
    
        

        
        
def integrand(mu, Phi, B, sigma):
	cte = np.sqrt(np.pi/2.0) * sigma
	if (mu == 0):
		out = np.exp(-0.5*Phi**2/sigma**2) / cte
	else:
		out = (sp.erf(Phi/(np.sqrt(2.0)*sigma)) + sp.erf((B*mu-Phi) / (np.sqrt(2.0)*sigma))) / (B*mu)	
	return out
Esempio n. 29
0
def robin_temperature(height, thickness, surfacetemp, heatflux, accumulation):
    """Compute the steady-state temperature according to Robin (1955)
    """
    q = np.sqrt(accumulation / (2 * thermal_diffusivity * thickness))
    T = surfacetemp - (heatflux * np.sqrt(np.pi) / (2 * thermal_conductivity * q)
            * (erf(height * q) - erf(thickness * q)))
    return T
Esempio n. 30
0
def rectangle(x, amplitude=1.0, center1=0.0, sigma1=1.0,
              center2=1.0, sigma2=1.0, form='linear'):
    """rectangle function: step up, step down  (see step function)
    starts at 0.0, rises to amplitude (at center1 with width sigma1)
    then drops to 0.0 (at center2 with width sigma2) with form:
      'linear' (default) = ramp_up + ramp_down
      'atan', 'arctan'   = amplitude*(atan(arg1) + atan(arg2))/pi
      'erf'              = amplitude*(erf(arg1) + erf(arg2))/2.
      'logisitic'        = amplitude*[1 - 1/(1 + exp(arg1)) - 1/(1+exp(arg2))]

    where arg1 =  (x - center1)/sigma1
    and   arg2 = -(x - center2)/sigma2
    """
    if abs(sigma1) <  1.e-13:
        sigma1 = 1.e-13
    if abs(sigma2) <  1.e-13:
        sigma2 = 1.e-13

    arg1 = (x - center1)/sigma1
    arg2 = (center2 - x)/sigma2
    if form == 'erf':
        out = 0.5*(erf(arg1) + erf(arg2))
    elif form.startswith('logi'):
        out = (1. - 1./(1. + exp(arg1)) - 1./(1. + exp(arg2)))
    elif form in ('atan', 'arctan'):
        out = (arctan(arg1) + arctan(arg2))/pi
    else:
        arg1[where(arg1 <  0)]  = 0.0
        arg1[where(arg1 >  1)]  = 1.0
        arg2[where(arg2 >  0)]  = 0.0
        arg2[where(arg2 < -1)] = -1.0
        out = arg1 + arg2
    return amplitude*out
Esempio n. 31
0
File: simlib.py Progetto: pelegs/msm
 def integral_1D(self, d, a, b):
     return self.A[d] * self.S[d] * (erf(
         (b - self.M[d]) / (sqrt2 * self.S[d])) - erf(
             (a - self.M[d]) / (sqrt2 * self.S[d])))
Esempio n. 32
0
def f_erf(r, Xi, T):
    return (0.5) * (erf((r - alpha(T)) / Xi) - 1)
Esempio n. 33
0
def B2_erf_analytical(Xi, T):
    return np.pi / 3 * (
        (alpha(T)**3 + 1.5 * alpha(T) * Xi**2) *
        (1 + erf(alpha(T) / Xi)) + 1 / np.sqrt(np.pi) *
        (alpha(T)**2 * Xi + Xi**3) * np.exp(-(alpha(T) / Xi)**2))
Esempio n. 34
0
#Potential Plots
#r=np.linspace(1, 1.12, 2000) #for erf plots #original SAVE
r = np.linspace(.5, 1.1225, 2000)  #2R=1.1225
#r=np.linspace(.5, 3, 2000)

#Plot WCA
sigma_over_r_to_pow6 = (sigma / r) * (sigma / r) * (sigma / r) * (
    sigma / r) * (sigma / r) * (sigma / r)
V = 4 * epsilon * (sigma_over_r_to_pow6 * sigma_over_r_to_pow6 -
                   sigma_over_r_to_pow6) + epsilon
#plt.plot(r/sigma,V/epsilon, label='Vwca', color='black')
plt.plot(r / sigma, V / KbT, label='Vwca V/T', color='black')

#Plot erf with Xi from B2
Verf = -KbT * np.log(.5 * (special.erf((r - alpha) / zeta_B2) + 1))
#plt.plot(r/sigma,Verf/epsilon, label='B2', color='blue')
plt.plot(r / sigma, Verf / KbT, label='B2 V/T', color='blue')

#Plot erf with Xi_ln
Verf = -KbT * np.log(.5 * (special.erf((r - alpha) / zeta_ln) + 1))
#plt.plot(r/sigma,Verf/epsilon, label='Xi ln', color='red')
plt.plot(r / sigma, Verf / KbT, label='Xi ln V/T', color='red')

#Plot erf with Xi_ln10
Verf = -KbT * np.log(.5 * (special.erf((r - alpha) / zeta_ln10) + 1))
#plt.plot(r/sigma,Verf/epsilon, label='Xi ln', color='red')
plt.plot(r / sigma, Verf / KbT, label='Xi ln10 V/T', color='purple')

#Plot erf with Eric's Xi
Verf = -KbT * np.log(.5 * (special.erf((r - alpha) / zeta_Eric) + 1))
Esempio n. 35
0
 def mu_q_ex(e, m_xi, std_xi, m_la):
     return e * (0.5 - 0.5 * erf(0.5 * math.sqrt(2) * (e - m_xi) / std_xi)) * m_la
Esempio n. 36
0
def get_gdp_comment(ecodict, ecomodel, econexposure, event_year, epicode):
    """Create a comment on the GDP impact of a given event in the most impacted country.

    :param ecodict:
      Dictionary containing country code keys and integer population estimations of economic loss.
    :param ecomodel:
      Instance of the EmpiricalLoss class.
    :param econexposure:
      Dictionary containing country code (ISO2) keys, and values of
      10 element arrays representing population exposure to MMI 1-10.
      Dictionary will contain an additional key 'Total', with value of exposure across all countries.
    :param event_year:
      Year in which event occurred.
    :param epicode:
      Two letter country code of epicenter or 'UK' if not a country (usu. ocean).
    :returns:
      A string which indicates what fraction of the country's GDP the losses represent.
    """
    # get the gdp comment
    # get the G value for the economic losses
    eco_gvalue = ecomodel.getCombinedG(ecodict)
    # get the country code of the country with the highest losses
    dccode = ''
    dmax = 0
    expected = ecodict['TotalDollars'] / 1e6
    if ecodict['TotalDollars'] > 0:
        for ccode, value in ecodict.items():
            if ccode == 'TotalDollars':
                continue
            if value > dmax:
                dmax = value
                dccode = ccode
    else:
        # how do I compare economic exposure between countries?
        # do I want to compare that, or just grab the country of epicenter?
        for ccode, value in ecodict.items():
            if ccode == 'TotalDollars':
                continue
            rates = ecomodel.getLossRates(ccode, np.arange(1, 10))
            emploss = np.nansum(rates * value)
            if emploss > dmax:
                dmax = emploss
                dccode = ccode

    if dccode == '':
        dccode = epicode
    gdp_obj = GDP.fromDefault()
    gdp, outccode = gdp_obj.getGDP(dccode, event_year)
    country = Country()
    logging.info('ccode: %s, dccode: %s, outccode: %s' %
                 (ccode, dccode, outccode))
    cinfo = country.getCountry(outccode)
    if cinfo != 'UK':
        pop = cinfo['Population']
    else:
        pop = 0
    T = (pop * gdp) / 1e6
    if T == 0:
        return ''
    percent = erf(1 / np.sqrt(2))
    plow = round(
        np.exp(np.log(max(expected, EPS)) - eco_gvalue * invphi(percent)))
    phigh = round(
        np.exp(eco_gvalue * invphi(percent) + np.log(max(expected, EPS))))
    if plow != 0:
        ptlow = int(plow * 1e2 / T)
    else:
        ptlow = 0
    if phigh != 0:
        pthigh = int(phigh * 1e2 / T)
    else:
        pthigh = 0
    if dccode in ['XF', 'EU', 'WU']:
        cname = 'the United States'
    else:
        cname = cinfo['Name']
    if pthigh < 1.0:
        strtxt = 'Estimated economic losses are less than 1%% of GDP of %s.' % cname
    else:
        if ptlow < 100:
            ptlow = set_num_precision(ptlow, 1)
        else:
            ptlow = set_num_precision(ptlow, 2)
        if pthigh < 100:
            pthigh = set_num_precision(pthigh, 1)
        else:
            pthigh = set_num_precision(pthigh, 2)
        if pthigh >= 100:
            strtxt = 'Estimated economic losses may exceed the GDP of %s.' % cname
        else:
            strtxt = 'Estimated economic losses are %i-%i%% GDP of %s.' % (
                ptlow, pthigh, cname)
    return strtxt
Esempio n. 37
0
def probit_inv(a):  #probit regression
    return 0.5 + 0.5 * erf(a / np.sqrt(2))
Esempio n. 38
0
def hsu(rainfall, cleaning_threshold, tilt, pm2_5, pm10,
        depo_veloc=None, rain_accum_period=pd.Timedelta('1h')):
    """
    Calculates soiling ratio given particulate and rain data using the
    Fixed Velocity model from Humboldt State University (HSU).

    The HSU soiling model [1]_ returns the soiling ratio, a value between zero
    and one which is equivalent to (1 - transmission loss). Therefore a soiling
    ratio of 1.0 is equivalent to zero transmission loss.

    Parameters
    ----------

    rainfall : Series
        Rain accumulated in each time period. [mm]

    cleaning_threshold : float
        Amount of rain in an accumulation period needed to clean the PV
        modules. [mm]

    tilt : float
        Tilt of the PV panels from horizontal. [degree]

    pm2_5 : numeric
        Concentration of airborne particulate matter (PM) with
        aerodynamic diameter less than 2.5 microns. [g/m^3]

    pm10 : numeric
        Concentration of airborne particulate matter (PM) with
        aerodynamicdiameter less than 10 microns. [g/m^3]

    depo_veloc : dict, default {'2_5': 0.0009, '10': 0.004}
        Deposition or settling velocity of particulates. [m/s]

    rain_accum_period : Timedelta, default 1 hour
        Period for accumulating rainfall to check against `cleaning_threshold`
        It is recommended that `rain_accum_period` be between 1 hour and
        24 hours.

    Returns
    -------
    soiling_ratio : Series
        Values between 0 and 1. Equal to 1 - transmission loss.

    References
    -----------
    .. [1] M. Coello and L. Boyle, "Simple Model For Predicting Time Series
       Soiling of Photovoltaic Panels," in IEEE Journal of Photovoltaics.
       doi: 10.1109/JPHOTOV.2019.2919628
    .. [2] Atmospheric Chemistry and Physics: From Air Pollution to Climate
       Change. J. Seinfeld and S. Pandis. Wiley and Sons 2001.

    """
    try:
        from scipy.special import erf
    except ImportError:
        raise ImportError("The pvlib.soiling.hsu function requires scipy.")

    # never use mutable input arguments
    if depo_veloc is None:
        depo_veloc = {'2_5': 0.0009, '10': 0.004}

    # accumulate rainfall into periods for comparison with threshold
    accum_rain = rainfall.rolling(rain_accum_period, closed='right').sum()
    # cleaning is True for intervals with rainfall greater than threshold
    cleaning_times = accum_rain.index[accum_rain >= cleaning_threshold]

    # determine the time intervals in seconds (dt_sec)
    dt = rainfall.index
    # subtract shifted values from original and convert to seconds
    dt_diff = (dt[1:] - dt[:-1]).total_seconds()
    # ensure same number of elements in the array, assuming that the interval
    # prior to the first value is equal in length to the first interval
    dt_sec = np.append(dt_diff[0], dt_diff).astype('float64')

    horiz_mass_rate = (
        pm2_5 * depo_veloc['2_5'] + np.maximum(pm10 - pm2_5, 0.)
        * depo_veloc['10']) * dt_sec
    tilted_mass_rate = horiz_mass_rate * cosd(tilt)  # assuming no rain

    # tms -> tilt_mass_rate
    tms_cumsum = np.cumsum(tilted_mass_rate * np.ones(rainfall.shape))

    mass_no_cleaning = pd.Series(index=rainfall.index, data=tms_cumsum)
    # specify dtype so pandas doesn't assume object
    mass_removed = pd.Series(index=rainfall.index, dtype='float64')
    mass_removed[0] = 0.
    mass_removed[cleaning_times] = mass_no_cleaning[cleaning_times]
    accum_mass = mass_no_cleaning - mass_removed.ffill()

    soiling_ratio = 1 - 0.3437 * erf(0.17 * accum_mass**0.8473)

    return soiling_ratio
Esempio n. 39
0
def spike_probability(
    x
):  # firing probability for unit variance and zero mean, and threshold = x
    return .5 * (1 - erf(x / sqrt(2.)))
Esempio n. 40
0
def approxLognormal(N, mu=0.0, sigma=1.0, tail_N=0, tail_bound=[0.02,0.98], tail_order=np.e):
    '''
    Construct a discrete approximation to a lognormal distribution with underlying
    normal distribution N(exp(mu),sigma).  Makes an equiprobable distribution by
    default, but user can optionally request augmented tails with exponentially
    sized point masses.  This can improve solution accuracy in some models.
    
    Parameters
    ----------
    N: int
        Number of discrete points in the "main part" of the approximation.
    mu: float
        Mean of underlying normal distribution.
    sigma: float
        Standard deviation of underlying normal distribution.
    tail_N: int
        Number of points in each "tail part" of the approximation; 0 = no tail.
    tail_bound: [float]
        CDF boundaries of the tails vs main portion; tail_bound[0] is the lower
        tail bound, tail_bound[1] is the upper tail bound.  Inoperative when
        tail_N = 0.  Can make "one tailed" approximations with 0.0 or 1.0.
    tail_order: float
        Factor by which consecutive point masses in a "tail part" differ in
        probability.  Should be >= 1 for sensible spacing.
        
    Returns
    -------
    pmf: np.ndarray
        Probabilities for discrete probability mass function.
    X: np.ndarray
        Discrete values in probability mass function.
        
    Written by Luca Gerotto
    Based on Matab function "setup_workspace.m," from Chris Carroll's
      [Solution Methods for Microeconomic Dynamic Optimization Problems]
      (http://www.econ2.jhu.edu/people/ccarroll/solvingmicrodsops/) toolkit.
    Latest update: 21 April 2016 by Matthew N. White
    '''
    # Find the CDF boundaries of each segment
    if sigma > 0.0:        
        if tail_N > 0:
            lo_cut     = tail_bound[0]
            hi_cut     = tail_bound[1]
        else:
            lo_cut     = 0.0
            hi_cut     = 1.0
        inner_size     = hi_cut - lo_cut
        inner_CDF_vals = [lo_cut + x*N**(-1.0)*inner_size for x in range(1, N)]
        if inner_size < 1.0:
            scale      = 1.0/tail_order
            mag        = (1.0-scale**tail_N)/(1.0-scale)
        lower_CDF_vals = [0.0]
        if lo_cut > 0.0:
            for x in range(tail_N-1,-1,-1):
                lower_CDF_vals.append(lower_CDF_vals[-1] + lo_cut*scale**x/mag)
        upper_CDF_vals  = [hi_cut]
        if hi_cut < 1.0:
            for x in range(tail_N):
                upper_CDF_vals.append(upper_CDF_vals[-1] + (1.0-hi_cut)*scale**x/mag)
        CDF_vals       = lower_CDF_vals + inner_CDF_vals + upper_CDF_vals
        temp_cutoffs   = list(stats.lognorm.ppf(CDF_vals[1:-1], s=sigma, loc=0, 
                                                scale=np.exp(mu)))
        cutoffs        = [0] + temp_cutoffs + [np.inf]
        CDF_vals       = np.array(CDF_vals)
    
        # Construct the discrete approximation by finding the average value within each segment
        K              = CDF_vals.size-1 # number of points in approximation
        pmf            = CDF_vals[1:(K+1)] - CDF_vals[0:K]
        X              = np.zeros(K)
        for i in range(K):
            zBot  = cutoffs[i]
            zTop = cutoffs[i+1]
            X[i] = (-0.5)*np.exp(mu+(sigma**2)*0.5)*(erf((mu+sigma**2-np.log(zTop))*(
                   (np.sqrt(2)*sigma)**(-1)))-erf((mu+sigma**2-np.log(zBot))*((np.sqrt(2)*sigma)
                   **(-1))))*(pmf[i]**(-1))           
    else:
        pmf = np.ones(N)/N
        X   = np.exp(mu)*np.ones(N)
    return [pmf, X]
Esempio n. 41
0
def shock_sol(x):
    eps = 1e-3
    k = np.sqrt(2 * eps)
    return np.cos(np.pi * x) + erf(x / k) / erf(1 / k)
Esempio n. 42
0
 def time_real(self, offset):
     erf(self.rand + offset)
Esempio n. 43
0
def errorf(x, *p):
    a, mu, sigma = p
    return 0.5 * a * (1.0 + erf((x - mu) / sigma))
Esempio n. 44
0
def cpij(gij, rc):
    cp = -2.0*rc*sqrt(gij/pi)*exp(-gij*rc*rc)
    cp+= erf(sqrt(gij)*rc)
    return cp
Esempio n. 45
0
def smooth_heaviside(x):
    return 0.5*(1+erf(x))
Esempio n. 46
0
        self.c = kwargs.get("c", 1.0)
        # sigmoid function bandwidth must be greater than zero
        assert self.c > 0.

    def __call__(self, x):
        """
        Returns
        -------
        :param numpy.ndarray y:
            Backarc scaling term
        """
        return 1. / (1. + np.exp(-(1. / self.c) * x))


# Get Gaussian cdf of a standard normal distribution
phix = lambda x: 0.5 * (1.0 + erf(x / np.sqrt(2.)))


class FABATaperGaussian(FABATaperStep):
    """
    Implements tapering of x according to a truncated Gaussian function

    :param float sigma:
        `Bandwidth' of function (according to a Gaussian standard deviation)
    :param float a:
        Initiation point of tapering (km)
    :param float b:
        Termination point of tapering (km)
    """
    def __init__(self, **kwargs):
from scipy.optimize import curve_fit
from scipy.special import erf
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import imread
from scipy import ndimage
import cv2
from skimage.color import rgb2gray
from astropy.convolution import Gaussian2DKernel, convolve

gauss_response = lambda x, x0, sigma, A: A * 0.5 * (1 + erf(
    (x - x0) / (sigma**2 * np.sqrt(2))))
gauss_response_inv = lambda x, x0, sigma, A: 1 * A - gauss_response(
    x, x0, sigma, A)


def fit_gaussian_response(x, y, inverse=False):
    if inverse:
        p, pcov = curve_fit(gauss_response_inv,
                            x,
                            y,
                            p0=[x[np.argmax(y)], 1,
                                np.max(y)])
    else:
        p, pcov = curve_fit(gauss_response,
                            x,
                            y,
                            p0=[x[np.argmax(y)], 1,
                                np.max(y)])
    return p
def load(load_filename):
    with h5py.File(load_filename, "r") as h5f:
        num_averages = h5f.attrs["num_averages"]
        readout_freq = h5f.attrs["readout_freq"]
        control_freq = h5f.attrs["control_freq"]
        readout_duration = h5f.attrs["readout_duration"]
        control_duration = h5f.attrs["control_duration"]
        readout_amp = h5f.attrs["readout_amp"]
        control_amp = h5f.attrs["control_amp"]
        sample_duration = h5f.attrs["sample_duration"]
        wait_delay = h5f.attrs["wait_delay"]
        readout_sample_delay = h5f.attrs["readout_sample_delay"]
        match_t_in_store = h5f.attrs["match_t_in_store"]
        t_arr = h5f["t_arr"][()]
        store_arr = h5f["store_arr"][()]
        match_g_data = h5f["match_g_data"][()]
        match_e_data = h5f["match_e_data"][()]
        template_g = h5f["template_g"][()]
        template_e = h5f["template_e"][()]
        source_code = h5f["source_code"][()]

    nr_samples = len(t_arr)
    t_span = nr_samples * (t_arr[1] - t_arr[0])
    match_idx = np.argmin(np.abs(t_arr - match_t_in_store))
    match_len = len(template_g)
    t_low = t_arr[match_idx]
    t_high = t_arr[match_idx + match_len]

    # Plot raw store data for first iteration as a check
    fig1, ax1 = plt.subplots(2, 1, sharex=True, tight_layout=True)
    ax11, ax12 = ax1
    ax11.axvspan(1e9 * t_low, 1e9 * t_high, facecolor="#dfdfdf")
    ax12.axvspan(1e9 * t_low, 1e9 * t_high, facecolor="#dfdfdf")
    ax11.plot(1e9 * t_arr, np.abs(store_arr[0, 0, :]))
    ax11.plot(1e9 * t_arr, np.abs(store_arr[1, 0, :]))
    ax12.plot(1e9 * t_arr, np.angle(store_arr[0, 0, :]))
    ax12.plot(1e9 * t_arr, np.angle(store_arr[1, 0, :]))
    ax12.set_xlabel("Time [ns]")
    fig1.show()

    # # Analyze
    threshold = 0.5 * (norm(template_e)**2 - norm(template_g)**2)
    match_diff = match_e_data - match_g_data - threshold  # does |e> match better than |g>?
    match_diff_g = match_diff[0::2]  # qubit was prepared in |g>
    match_diff_e = match_diff[1::2]  # qubit was prepared in |e>
    idx_low_g = match_diff_g < 0
    idx_high_g = np.logical_not(idx_low_g)
    idx_low_e = match_diff_e < 0
    idx_high_e = np.logical_not(idx_low_e)
    mean_low_g = match_diff_g[idx_low_g].mean()
    mean_high_g = match_diff_g[idx_high_g].mean()
    mean_low_e = match_diff_e[idx_low_e].mean()
    mean_high_e = match_diff_e[idx_high_e].mean()
    std_low_g = match_diff_g[idx_low_g].std()
    std_high_g = match_diff_g[idx_high_g].std()
    std_low_e = match_diff_e[idx_low_e].std()
    std_high_e = match_diff_e[idx_high_e].std()
    weight_low_g = np.sum(idx_low_g) / len(idx_low_g)
    weight_high_g = 1.0 - weight_low_g
    weight_low_e = np.sum(idx_low_e) / len(idx_low_e)
    weight_high_e = 1.0 - weight_low_e
    std = max(std_low_g, std_high_g, std_low_e, std_high_e)
    x_min = min(mean_low_g, mean_low_e) - 5 * std
    x_max = max(mean_high_g, mean_high_e) + 5 * std

    H_g, xedges = np.histogram(match_diff_g,
                               bins=100,
                               range=(x_min, x_max),
                               density=True)
    H_e, xedges = np.histogram(match_diff_e,
                               bins=100,
                               range=(x_min, x_max),
                               density=True)
    xdata = 0.5 * (xedges[1:] + xedges[:-1])

    init_g = np.array([
        mean_low_g, std_low_g, weight_low_g, mean_high_g, std_high_g,
        weight_high_g
    ])
    init_e = np.array([
        mean_low_e, std_low_e, weight_low_e, mean_high_e, std_high_e,
        weight_high_e
    ])
    if FIXED:
        # skip second weight
        popt_g, pcov_g = curve_fit(double_gaussian_fixed, xdata, H_g, p0=init_g[:-1])
        popt_e, pcov_e = curve_fit(double_gaussian_fixed, xdata, H_e, p0=init_e[:-1])
        # add back second weight for ease of use
        popt_g = np.r_[popt_g, 1.0 - popt_g[2]]
        popt_e = np.r_[popt_e, 1.0 - popt_e[2]]
    else:
        popt_g, pcov_g = curve_fit(double_gaussian, xdata, H_g, p0=init_g)
        popt_e, pcov_e = curve_fit(double_gaussian, xdata, H_e, p0=init_e)
    fidelity_g = 0.5 * (1 + erf((0.0 - popt_g[0]) / np.sqrt(2 * popt_g[1]**2)))
    fidelity_e = 1.0 - 0.5 * (1 + erf(
        (0.0 - popt_e[3]) / np.sqrt(2 * popt_e[4]**2)))

    fig2, ax2 = plt.subplots(1,
                             2,
                             sharex=True,
                             sharey=True,
                             tight_layout=True,
                             figsize=(12.8, 4.8))
    ax21, ax22 = ax2
    for ax_ in ax2:
        ax_.axvline(0.0, c="tab:gray", alpha=0.25)
        ax_.axhline(0.0, c="tab:gray", alpha=0.25)

    hist_plot(ax21, H_g, xedges, lw=1)
    ax21.plot(xdata, double_gaussian(xdata, *popt_g), c="k")
    ax21.plot(xdata,
              single_gaussian(xdata, *popt_g[:3]),
              ls="--",
              label=f"$\\left|\\mathrm{{g}}\\right>$: {popt_g[2]:.1%}")
    ax21.plot(xdata,
              single_gaussian(xdata, *popt_g[3:]),
              ls="--",
              label=f"$\\left|\\mathrm{{e}}\\right>$: {popt_g[5]:.1%}")
    ax21.set_xlabel("Comparator result")
    ax21.set_title(
        f"Qubit prepared in $\\left|\\mathrm{{g}}\\right>$: $\\mathcal{{F}}$ = {fidelity_g:.1%}"
    )
    ax21.legend(title="Qubit measured in")

    hist_plot(ax22, H_e, xedges, lw=1)
    ax22.plot(xdata, double_gaussian(xdata, *popt_e), c="k")
    ax22.plot(xdata,
              single_gaussian(xdata, *popt_e[:3]),
              ls="--",
              label=f"$\\left|\\mathrm{{g}}\\right>$: {popt_e[2]:.1%}")
    ax22.plot(xdata,
              single_gaussian(xdata, *popt_e[3:]),
              ls="--",
              label=f"$\\left|\\mathrm{{e}}\\right>$: {popt_e[5]:.1%}")
    ax22.set_xlabel("Comparator result")
    ax22.set_title(
        f"Qubit prepared in $\\left|\\mathrm{{e}}\\right>$: $\\mathcal{{F}}$ = {fidelity_e:.1%}"
    )
    ax22.legend(title="Qubit measured in")

    fig2.show()

    print(popt_g)
    print(popt_e)

    return fig1, fig2
 def threshold_negative(self, y, indices):
     y = y[indices]
     thresholds = self.thresholds[indices]
     return y + thresholds - 2 * thresholds / math.pi * special.erf(
         -self.alpha * (y + thresholds) / thresholds)
Esempio n. 50
0
 def ifunc(x):
     """Integral of func, ifunc(-inf) = 0"""
     from scipy.special import erf
     return np.sqrt(np.pi) / 2 * (erf(x) + 1)
Esempio n. 51
0
def pi(x, b):                          #  0        1     2    3  4
    # b is np.array of these parameters: [sigma_1, x_sh, bkg, B, b]
    s_1 = 0.5*b[3]/( b[0] )
    s_2 = b[0]*erf( (b[4]-x-b[1])/(sqrt(2)*b[0]) )
    s_3 = b[0]*erf( (-b[4]-x-b[1])/(sqrt(2)*b[0]) )
    return s_1*(s_2 - s_3) + b[2]     # x in mm
Esempio n. 52
0
def normal_to_uniform(u, a, b):
    x = np.zeros(shape=(u.shape[0], u.shape[1]))
    for i in range(u.shape[1]):
        p = 0.5 + erf(((u[:, i] - 0) / 1) / np.sqrt(2)) / 2
        x[:, i] = a + (b - a) * p
    return x
Esempio n. 53
0
def scatterplot(x, y, *args, **kwargs):
    """
    NAME:

       scatterplot

    PURPOSE:

       make a 'smart' scatterplot that is a density plot in high-density
       regions and a regular scatterplot for outliers

    INPUT:

       x, y

       xlabel - (raw string!) x-axis label, LaTeX math mode, no $s needed

       ylabel - (raw string!) y-axis label, LaTeX math mode, no $s needed

       xrange

       yrange

       bins - number of bins to use in each dimension

       weights - data-weights

       aspect - aspect ratio

       conditional - normalize each column separately (for probability densities, i.e., cntrmass=True)

       contours - if False, don't plot contours

       justcontours - if True, only draw contours, no density

       cntrcolors - color of contours (can be array as for bovy_dens2d)

       cntrlw, cntrls - linewidths and linestyles for contour

       cntrSmooth - use ndimage.gaussian_filter to smooth before contouring

       levels - contour-levels; data points outside of the last level will be individually shown (so, e.g., if this list is descending, contours and data points will be overplotted)

       onedhists - if True, make one-d histograms on the sides

       onedhistx - if True, make one-d histograms on the side of the x distribution

       onedhisty - if True, make one-d histograms on the side of the y distribution

       onedhistcolor, onedhistfc, onedhistec

       onedhistxnormed, onedhistynormed - normed keyword for one-d histograms
       
       onedhistxweights, onedhistyweights - weights keyword for one-d histograms

       cmap= cmap for density plot

       hist= and edges= - you can supply the histogram of the data yourself, this can be useful if you want to censor the data, both need to be set and calculated using scipy.histogramdd with the given range

       retAxes= return all Axes instances

    OUTPUT:

       plot to output device, Axes instance(s) or not, depending on input

    HISTORY:

       2010-04-15 - Written - Bovy (NYU)

    """
    xlabel = kwargs.pop('xlabel', None)
    ylabel = kwargs.pop('ylabel', None)
    if 'xrange' in kwargs:
        xrange = kwargs.pop('xrange')
    else:
        if isinstance(x, list): xrange = [sc.amin(x), sc.amax(x)]
        else: xrange = [x.min(), x.max()]
    if 'yrange' in kwargs:
        yrange = kwargs.pop('yrange')
    else:
        if isinstance(y, list): yrange = [sc.amin(y), sc.amax(y)]
        else: yrange = [y.min(), y.max()]
    ndata = len(x)
    bins = kwargs.pop('bins', round(0.3 * sc.sqrt(ndata)))
    weights = kwargs.pop('weights', None)
    levels = kwargs.pop('levels', special.erf(sc.arange(1, 4) / sc.sqrt(2.)))
    aspect = kwargs.pop('aspect',
                        (xrange[1] - xrange[0]) / (yrange[1] - yrange[0]))
    conditional = kwargs.pop('conditional', False)
    contours = kwargs.pop('contours', True)
    justcontours = kwargs.pop('justcontours', False)
    cntrcolors = kwargs.pop('cntrcolors', 'k')
    cntrlw = kwargs.pop('cntrlw', None)
    cntrls = kwargs.pop('cntrls', None)
    cntrSmooth = kwargs.pop('cntrSmooth', None)
    onedhists = kwargs.pop('onedhists', False)
    onedhistx = kwargs.pop('onedhistx', onedhists)
    onedhisty = kwargs.pop('onedhisty', onedhists)
    onedhisttype = kwargs.pop('onedhisttype', 'step')
    onedhistcolor = kwargs.pop('onedhistcolor', 'k')
    onedhistfc = kwargs.pop('onedhistfc', 'w')
    onedhistec = kwargs.pop('onedhistec', 'k')
    onedhistls = kwargs.pop('onedhistls', 'solid')
    onedhistlw = kwargs.pop('onedhistlw', None)
    onedhistsbins = kwargs.pop('onedhistsbins', round(0.3 * sc.sqrt(ndata)))
    overplot = kwargs.pop('overplot', False)
    cmap = kwargs.pop('cmap', cm.gist_yarg)
    onedhistxnormed = kwargs.pop('onedhistxnormed', True)
    onedhistynormed = kwargs.pop('onedhistynormed', True)
    onedhistxweights = kwargs.pop('onedhistxweights', weights)
    onedhistyweights = kwargs.pop('onedhistyweights', weights)
    retAxes = kwargs.pop('retAxes', False)
    if onedhists or onedhistx or onedhisty:
        if overplot: fig = pyplot.gcf()
        else: fig = pyplot.figure()
        nullfmt = NullFormatter()  # no labels
        # definitions for the axes
        left, width = 0.1, 0.65
        bottom, height = 0.1, 0.65
        bottom_h = left_h = left + width
        rect_scatter = [left, bottom, width, height]
        rect_histx = [left, bottom_h, width, 0.2]
        rect_histy = [left_h, bottom, 0.2, height]
        axScatter = pyplot.axes(rect_scatter)
        if onedhistx:
            axHistx = pyplot.axes(rect_histx)
            # no labels
            axHistx.xaxis.set_major_formatter(nullfmt)
            axHistx.yaxis.set_major_formatter(nullfmt)
        if onedhisty:
            axHisty = pyplot.axes(rect_histy)
            # no labels
            axHisty.xaxis.set_major_formatter(nullfmt)
            axHisty.yaxis.set_major_formatter(nullfmt)
        fig.sca(axScatter)
    data = sc.array([x, y]).T
    if 'hist' in kwargs and 'edges' in kwargs:
        hist = kwargs['hist']
        kwargs.pop('hist')
        edges = kwargs['edges']
        kwargs.pop('edges')
    else:
        hist, edges = sc.histogramdd(data,
                                     bins=bins,
                                     range=[xrange, yrange],
                                     weights=weights)
    if contours:
        cumimage = bovy_dens2d(hist.T,
                               contours=contours,
                               levels=levels,
                               cntrmass=contours,
                               cntrSmooth=cntrSmooth,
                               cntrcolors=cntrcolors,
                               cmap=cmap,
                               origin='lower',
                               xrange=xrange,
                               yrange=yrange,
                               xlabel=xlabel,
                               ylabel=ylabel,
                               interpolation='nearest',
                               retCumImage=True,
                               aspect=aspect,
                               conditional=conditional,
                               cntrlw=cntrlw,
                               cntrls=cntrls,
                               justcontours=justcontours,
                               zorder=5 * justcontours,
                               overplot=(onedhists or overplot or onedhistx
                                         or onedhisty))
    else:
        cumimage = bovy_dens2d(hist.T,
                               contours=contours,
                               cntrcolors=cntrcolors,
                               cmap=cmap,
                               origin='lower',
                               xrange=xrange,
                               yrange=yrange,
                               xlabel=xlabel,
                               ylabel=ylabel,
                               interpolation='nearest',
                               conditional=conditional,
                               retCumImage=True,
                               aspect=aspect,
                               cntrlw=cntrlw,
                               cntrls=cntrls,
                               overplot=(onedhists or overplot or onedhistx
                                         or onedhisty))
    #Set axes and labels
    pyplot.axis(list(xrange) + list(yrange))
    if not overplot:
        _add_axislabels(xlabel, ylabel)
        _add_ticks()
    binxs = []
    xedge = edges[0]
    for ii in range(len(xedge) - 1):
        binxs.append((xedge[ii] + xedge[ii + 1]) / 2.)
    binxs = sc.array(binxs)
    binys = []
    yedge = edges[1]
    for ii in range(len(yedge) - 1):
        binys.append((yedge[ii] + yedge[ii + 1]) / 2.)
    binys = sc.array(binys)
    cumInterp = interpolate.RectBivariateSpline(binxs,
                                                binys,
                                                cumimage.T,
                                                kx=1,
                                                ky=1)
    cums = []
    for ii in range(len(x)):
        cums.append(cumInterp(x[ii], y[ii])[0, 0])
    cums = sc.array(cums)
    plotx = x[cums > levels[-1]]
    ploty = y[cums > levels[-1]]
    if not len(plotx) == 0:
        if not weights == None:
            w8 = weights[cums > levels[-1]]
            for ii in range(len(plotx)):
                bovy_plot(plotx[ii],
                          ploty[ii],
                          overplot=True,
                          color='%.2f' % (1. - w8[ii]),
                          *args,
                          **kwargs)
        else:
            bovy_plot(plotx, ploty, overplot=True, zorder=1, *args, **kwargs)
    #Add onedhists
    if not (onedhists or onedhistx or onedhisty):
        if retAxes:
            return pyplot.gca()
        else:
            return None
    if onedhistx:
        histx, edges, patches = axHistx.hist(x,
                                             bins=onedhistsbins,
                                             normed=onedhistxnormed,
                                             weights=onedhistxweights,
                                             histtype=onedhisttype,
                                             range=sorted(xrange),
                                             color=onedhistcolor,
                                             fc=onedhistfc,
                                             ec=onedhistec,
                                             ls=onedhistls,
                                             lw=onedhistlw)
    if onedhisty:
        histy, edges, patches = axHisty.hist(y,
                                             bins=onedhistsbins,
                                             orientation='horizontal',
                                             weights=onedhistyweights,
                                             normed=onedhistynormed,
                                             histtype=onedhisttype,
                                             range=sorted(yrange),
                                             color=onedhistcolor,
                                             fc=onedhistfc,
                                             ec=onedhistec,
                                             ls=onedhistls,
                                             lw=onedhistlw)
    if onedhistx and not overplot:
        axHistx.set_xlim(axScatter.get_xlim())
        axHistx.set_ylim(0, 1.2 * sc.amax(histx))
    if onedhisty and not overplot:
        axHisty.set_ylim(axScatter.get_ylim())
        axHisty.set_xlim(0, 1.2 * sc.amax(histy))
    if not onedhistx: axHistx = None
    if not onedhisty: axHisty = None
    if retAxes:
        return (axScatter, axHistx, axHisty)
    else:
        return None
 def threshold_positive(self, y, indices):
     y = y[indices]
     thresholds = self.thresholds[indices]
     return y - thresholds + 2 * thresholds / math.pi * special.erf(
         self.alpha * (y - thresholds) / thresholds)
Esempio n. 55
0
import numpy as np
import scipy.special as sp
import matplotlib.pyplot as plt


# Function to be integrated
def f(x, alpha):
    return np.exp(-alpha * x**2)


def trap(x, y):
    h = np.abs(x[1] - x[0])
    return h * (y[0] + y[-1]) / 2 + np.sum(h * y[1:-1])


# Actual integration with the trapezoidal rule
interval = [-1, 1]
alpha = 1
N = 10000
x = np.linspace(interval[0], interval[1], N + 1)
y = f(x, alpha)
I = trap(x, y)
exact = sp.erf(interval[1]) * np.sqrt(np.pi)

print("Exact: ", exact)
print("Numerical: ", I)
print("Error: ", exact - I)
Esempio n. 56
0
def integral(x, beta=10):
    return sqrt(pi) / (2 * beta) * erf(beta * x) - cos(x)
Esempio n. 57
0
def simulate_wm(N_excitatory=1024,
                N_inhibitory=256,
                N_extern_poisson=1000,
                poisson_firing_rate=1.4 * b2.Hz,
                weight_scaling_factor=2.,
                sigma_weight_profile=20.,
                Jpos_excit2excit=1.6,
                stimulus_center_deg=180,
                stimulus_width_deg=40,
                stimulus_strength=0.07 * b2.namp,
                t_stimulus_start=0 * b2.ms,
                t_stimulus_duration=0 * b2.ms,
                distractor_center_deg=90,
                distractor_width_deg=40,
                distractor_strength=0.0 * b2.namp,
                t_distractor_start=0 * b2.ms,
                t_distractor_duration=0 * b2.ms,
                G_inhib2inhib=1.024 * b2.nS,
                G_inhib2excit=1.336 * b2.nS,
                G_excit2excit=0.381 * b2.nS,
                G_excit2inhib=0.292 * b2.nS,
                monitored_subset_size=1024,
                sim_time=800. * b2.ms):
    """
    Args:
        N_excitatory (int): Size of the excitatory population
        N_inhibitory (int): Size of the inhibitory population
        weight_scaling_factor (float): weight prefactor. When increasing the size of the populations,
            the synaptic weights have to be decreased. Using the default values, we have
            N_excitatory*weight_scaling_factor = 2048 and N_inhibitory*weight_scaling_factor=512
        N_extern_poisson (int): Size of the external input population (Poisson input)
        poisson_firing_rate (Quantity): Firing rate of the external population
        sigma_weight_profile (float): standard deviation of the gaussian input profile in
            the excitatory population.
        Jpos_excit2excit (float): Strength of the recurrent input within the excitatory population.
            Jneg_excit2excit is computed from sigma_weight_profile, Jpos_excit2excit and the normalization
            condition.
        stimulus_center_deg (float): Center of the stimulus in [0, 360]
        stimulus_width_deg (float): width of the stimulus. All neurons in
            stimulus_center_deg +\\- (stimulus_width_deg/2) receive the same input current
        stimulus_strength (Quantity): Input current to the neurons at stimulus_center_deg +\\- (stimulus_width_deg/2)
        t_stimulus_start (Quantity): time when the input stimulus is turned on
        t_stimulus_duration (Quantity): duration of the stimulus.
        distractor_center_deg (float): Center of the distractor in [0, 360]
        distractor_width_deg (float): width of the distractor. All neurons in
            distractor_center_deg +\\- (distractor_width_deg/2) receive the same input current
            distractor_strength (Quantity): Input current to the neurons at
            distractor_center_deg +\\- (distractor_width_deg/2)
        t_distractor_start (Quantity): time when the distractor is turned on
        t_distractor_duration (Quantity): duration of the distractor.
        G_inhib2inhib (Quantity): projections from inhibitory to inhibitory population (later
            rescaled by weight_scaling_factor)
        G_inhib2excit (Quantity): projections from inhibitory to excitatory population (later
            rescaled by weight_scaling_factor)
        G_excit2excit (Quantity): projections from excitatory to excitatory population (later
            rescaled by weight_scaling_factor)
        G_excit2inhib (Quantity): projections from excitatory to inhibitory population (later
            rescaled by weight_scaling_factor)
        monitored_subset_size (int): nr of neurons for which a Spike- and Voltage monitor
            is registered.
        sim_time (Quantity): simulation time

    Returns:

       results (tuple):
       rate_monitor_excit (Brian2 PopulationRateMonitor for the excitatory population),
        spike_monitor_excit, voltage_monitor_excit, idx_monitored_neurons_excit,\
        rate_monitor_inhib, spike_monitor_inhib, voltage_monitor_inhib, idx_monitored_neurons_inhib,\
        weight_profile_45 (The weights profile for the neuron with preferred direction = 45deg).
    """
    # specify the excitatory pyramidal cells:
    Cm_excit = 0.5 * b2.nF  # membrane capacitance of excitatory neurons
    G_leak_excit = 25.0 * b2.nS  # leak conductance
    E_leak_excit = -70.0 * b2.mV  # reversal potential
    v_firing_threshold_excit = -50.0 * b2.mV  # spike condition
    v_reset_excit = -60.0 * b2.mV  # reset voltage after spike
    t_abs_refract_excit = 2.0 * b2.ms  # absolute refractory period

    # specify the weight profile in the recurrent population
    # std-dev of the gaussian weight profile around the prefered direction
    # sigma_weight_profile = 12.0  # std-dev of the gaussian weight profile around the prefered direction

    #
    # Jneg_excit2excit = 0

    # specify the inhibitory interneurons:
    Cm_inhib = 0.2 * b2.nF
    G_leak_inhib = 20.0 * b2.nS
    E_leak_inhib = -70.0 * b2.mV
    v_firing_threshold_inhib = -50.0 * b2.mV
    v_reset_inhib = -60.0 * b2.mV
    t_abs_refract_inhib = 1.0 * b2.ms

    # specify the AMPA synapses
    E_AMPA = 0.0 * b2.mV
    tau_AMPA = .9 * 2.0 * b2.ms

    # specify the GABA synapses
    E_GABA = -70.0 * b2.mV
    tau_GABA = 10.0 * b2.ms

    # specify the NMDA synapses
    E_NMDA = 0.0 * b2.mV
    tau_NMDA_s = .65 * 100.0 * b2.ms  # orig: 100
    tau_NMDA_x = .94 * 2.0 * b2.ms
    alpha_NMDA = 0.5 * b2.kHz

    # projections from the external population
    G_extern2inhib = 2.38 * b2.nS
    G_extern2excit = 3.1 * b2.nS

    # projectsions from the inhibitory populations
    G_inhib2inhib *= weight_scaling_factor
    G_inhib2excit *= weight_scaling_factor

    # projections from the excitatory population
    G_excit2excit *= weight_scaling_factor
    G_excit2inhib *= weight_scaling_factor  # todo: verify this scaling

    t_stimulus_end = t_stimulus_start + t_stimulus_duration
    t_distractor_end = t_distractor_start + t_distractor_duration
    # compute the simulus index
    stim_center_idx = int(round(N_excitatory / 360. * stimulus_center_deg))
    stim_width_idx = int(round(N_excitatory / 360. * stimulus_width_deg / 2))
    stim_target_idx = [
        idx % N_excitatory
        for idx in range(stim_center_idx - stim_width_idx, stim_center_idx +
                         stim_width_idx + 1)
    ]
    # compute the distractor index
    distr_center_idx = int(round(N_excitatory / 360. * distractor_center_deg))
    distr_width_idx = int(round(N_excitatory / 360. * distractor_width_deg /
                                2))
    distr_target_idx = [
        idx % N_excitatory
        for idx in range(distr_center_idx - distr_width_idx, distr_center_idx +
                         distr_width_idx + 1)
    ]

    # precompute the weight profile for the recurrent population
    tmp = math.sqrt(2. * math.pi) * sigma_weight_profile * erf(
        180. / math.sqrt(2.) / sigma_weight_profile) / 360.
    Jneg_excit2excit = (1. - Jpos_excit2excit * tmp) / (1. - tmp)
    presyn_weight_kernel = \
        [(Jneg_excit2excit +
          (Jpos_excit2excit - Jneg_excit2excit) *
          math.exp(-.5 * (360. * min(j, N_excitatory - j) / N_excitatory) ** 2 / sigma_weight_profile ** 2))
         for j in range(N_excitatory)]
    # validate the normalization condition: (360./N_excitatory)*sum(presyn_weight_kernel)/360.
    fft_presyn_weight_kernel = rfft(presyn_weight_kernel)
    weight_profile_45 = deque(presyn_weight_kernel)
    rot_dist = int(round(len(weight_profile_45) / 8))
    weight_profile_45.rotate(rot_dist)

    # define the inhibitory population
    inhib_lif_dynamics = """
        s_NMDA_total : 1  # the post synaptic sum of s. compare with s_NMDA_presyn
        dv/dt = (
        - G_leak_inhib * (v-E_leak_inhib)
        - G_extern2inhib * s_AMPA * (v-E_AMPA)
        - G_inhib2inhib * s_GABA * (v-E_GABA)
        - G_excit2inhib * s_NMDA_total  * (v-E_NMDA)/(1.0+1.0*exp(-0.062*1e3*v/volt)/3.57)
        )/Cm_inhib : volt (unless refractory)
        ds_AMPA/dt = -s_AMPA/tau_AMPA : 1
        ds_GABA/dt = -s_GABA/tau_GABA : 1
    """

    inhib_pop = NeuronGroup(N_inhibitory,
                            model=inhib_lif_dynamics,
                            threshold="v>v_firing_threshold_inhib",
                            reset="v=v_reset_inhib",
                            refractory=t_abs_refract_inhib,
                            method="rk2")
    # initialize with random voltages:
    inhib_pop.v = numpy.random.uniform(v_reset_inhib / b2.mV,
                                       high=v_firing_threshold_inhib / b2.mV,
                                       size=N_inhibitory) * b2.mV
    # set the connections: inhib2inhib
    syn_inhib2inhib = Synapses(inhib_pop,
                               target=inhib_pop,
                               on_pre="s_GABA += 1.0",
                               delay=0.0 * b2.ms)
    syn_inhib2inhib.connect(condition="i!=j", p=1.0)
    # set the connections: extern2inhib
    input_ext2inhib = PoissonInput(target=inhib_pop,
                                   target_var="s_AMPA",
                                   N=N_extern_poisson,
                                   rate=poisson_firing_rate,
                                   weight=1.0)

    # specify the excitatory population:
    excit_lif_dynamics = """
        I_stim : amp
        s_NMDA_total : 1  # the post synaptic sum of s. compare with s_NMDA_presyn
        dv/dt = (
        - G_leak_excit * (v-E_leak_excit)
        - G_extern2excit * s_AMPA * (v-E_AMPA)
        - G_inhib2excit * s_GABA * (v-E_GABA)
        - G_excit2excit * s_NMDA_total * (v-E_NMDA)/(1.0+1.0*exp(-0.062*1e3*v/volt)/3.57)
        + I_stim
        )/Cm_excit : volt (unless refractory)
        ds_AMPA/dt = -s_AMPA/tau_AMPA : 1
        ds_GABA/dt = -s_GABA/tau_GABA : 1
        ds_NMDA/dt = -s_NMDA/tau_NMDA_s + alpha_NMDA * x * (1-s_NMDA) : 1
        dx/dt = -x/tau_NMDA_x : 1
    """

    excit_pop = NeuronGroup(N_excitatory,
                            model=excit_lif_dynamics,
                            threshold="v>v_firing_threshold_excit",
                            reset="v=v_reset_excit; x+=1.0",
                            refractory=t_abs_refract_excit,
                            method="rk2")
    # initialize with random voltages:
    excit_pop.v = numpy.random.uniform(v_reset_excit / b2.mV,
                                       high=v_firing_threshold_excit / b2.mV,
                                       size=N_excitatory) * b2.mV
    excit_pop.I_stim = 0. * b2.namp
    # set the connections: extern2excit
    input_ext2excit = PoissonInput(target=excit_pop,
                                   target_var="s_AMPA",
                                   N=N_extern_poisson,
                                   rate=poisson_firing_rate,
                                   weight=1.0)

    # set the connections: inhibitory to excitatory
    syn_inhib2excit = Synapses(inhib_pop,
                               target=excit_pop,
                               on_pre="s_GABA += 1.0")
    syn_inhib2excit.connect(p=1.0)

    # set the connections: excitatory to inhibitory NMDA connections
    syn_excit2inhib = Synapses(
        excit_pop,
        inhib_pop,
        model="s_NMDA_total_post = s_NMDA_pre : 1 (summed)",
        method="rk2")
    syn_excit2inhib.connect(p=1.0)

    # # set the connections: UNSTRUCTURED excitatory to excitatory
    # syn_excit2excit = Synapses(excit_pop, excit_pop,
    #        model= "s_NMDA_total_post = s_NMDA_pre : 1 (summed)", method="rk2")
    # syn_excit2excit.connect(condition="i!=j", p=1.)

    # set the STRUCTURED recurrent input. use a network_operation
    @network_operation()
    def update_nmda_sum():
        fft_s_NMDA = rfft(excit_pop.s_NMDA)
        fft_s_NMDA_total = numpy.multiply(fft_presyn_weight_kernel, fft_s_NMDA)
        s_NMDA_tot = irfft(fft_s_NMDA_total)
        excit_pop.s_NMDA_total_ = s_NMDA_tot

    @network_operation(dt=1 * b2.ms)
    def stimulate_network(t):
        if t >= t_stimulus_start and t < t_stimulus_end:
            # excit_pop[stim_start_i - 15:stim_start_i + 15].I_stim = 0.25 * b2.namp
            # Todo: review indexing
            # print("stim on")
            excit_pop.I_stim[stim_target_idx] = stimulus_strength
        else:
            # print("stim off")
            excit_pop.I_stim = 0. * b2.namp
        # add distractor
        if t >= t_distractor_start and t < t_distractor_end:
            excit_pop.I_stim[distr_target_idx] = distractor_strength

    def get_monitors(pop, nr_monitored, N):
        nr_monitored = min(nr_monitored, (N))
        idx_monitored_neurons = \
            [int(math.ceil(k))
             for k in numpy.linspace(0, N - 1, nr_monitored + 2)][1:-1]  # sample(range(N), nr_monitored)
        rate_monitor = PopulationRateMonitor(pop)
        # record= some_list is not supported? :-(
        spike_monitor = SpikeMonitor(pop, record=idx_monitored_neurons)
        voltage_monitor = StateMonitor(pop, "v", record=idx_monitored_neurons)
        return rate_monitor, spike_monitor, voltage_monitor, idx_monitored_neurons

    # collect data of a subset of neurons:
    rate_monitor_inhib, spike_monitor_inhib, voltage_monitor_inhib, idx_monitored_neurons_inhib = \
        get_monitors(inhib_pop, monitored_subset_size, N_inhibitory)

    rate_monitor_excit, spike_monitor_excit, voltage_monitor_excit, idx_monitored_neurons_excit = \
        get_monitors(excit_pop, monitored_subset_size, N_excitatory)

    b2.run(sim_time)
    return \
        rate_monitor_excit, spike_monitor_excit, voltage_monitor_excit, idx_monitored_neurons_excit,\
        rate_monitor_inhib, spike_monitor_inhib, voltage_monitor_inhib, idx_monitored_neurons_inhib,\
        weight_profile_45
Esempio n. 58
0
def stepfun(x):
    return (1.+erf(x/sqrt(2.)))/2.
Esempio n. 59
0
def B2_erf_quad_integrand(r, Xi, T):
    erf_mayer_function = (1.0 / 2) * (erf((r - alpha(T)) / Xi) - 1)
    return (-1.0 / 2) * (4 * np.pi) * (erf_mayer_function) * r * r
def bokeh_corner_plot(dataset,
                      TOOLS=None,
                      hist_color='orange',
                      kde_color="violet"):
    if isinstance(dataset, np.ndarray):
        dataset = DataFrame(dataset)

    if TOOLS is None:
        TOOLS = "box_select,lasso_select,pan,wheel_zoom,box_zoom,reset,help"

    scatter_plots = []
    y_max = len(dataset.columns) - 1
    for i, y_col in enumerate(dataset):
        for j, x_col in enumerate(dataset):
            df = DataFrame({
                x_col: dataset[x_col].tolist(),
                y_col: dataset[y_col].tolist()
            })
            fig = figure(tools=TOOLS,
                         toolbar_location="below",
                         toolbar_sticky=False)
            if i >= j:
                if i != j:
                    fig.scatter(x=x_col, y=y_col, source=df)
                else:
                    x_now = np.sort(dataset[x_col].values)
                    mu, sigma = np.mean(x_now), np.std(x_now)
                    hist, edges = np.histogram(x_now,
                                               density=True,
                                               bins=len(x_now) // 100)
                    pdf = 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(
                        -0.5 * (x_now - mu)**2 / sigma**2)
                    cdf = 0.5 * (1 + special.erf(
                        (x_now - mu) / np.sqrt(2 * sigma**2)))

                    fig.quad(top=hist,
                             bottom=0,
                             left=edges[:-1],
                             right=edges[1:],
                             fill_color=hist_color,
                             line_color=hist_color,
                             alpha=1.0)
                    fig.line(x_now,
                             pdf,
                             line_color=kde_color,
                             line_width=8,
                             alpha=0.7)  #, legend="PDF")
                    #fig.line(x_now, cdf, line_color="black"  , line_width=2, alpha=0.5, legend="CDF")
                if j > 0:
                    fig.yaxis.axis_label = ""
                    fig.yaxis.visible = False
                if i < y_max:
                    fig.xaxis.axis_label = ""
                    fig.xaxis.visible = False
            else:
                fig.outline_line_color = None

            scatter_plots.append(fig)

    grid = gridplot(scatter_plots, ncols=len(dataset.columns))
    show(grid)