Ejemplo n.º 1
0
    def integrate(self, lo, up, points=None, function=None):
        """Integrate between lo and up of the values
    
    Arguments:
      lo,up (float or array): lower/upper bound of integration
      function (function or None): function applied to va;lues before integration
    
    Returns:
      float: the integral
    """
        if function is None:
            tck = self.tck()
        else:
            points = self.get_points(
                points=points,
                error=
                'points need to be specified in order to integrate a function along the spline!'
            )
            values = function(self.get_values(points=points))
            #tck = splrep(points, values, t = self._knots, task = -1, k = self.degree);
            tck = splrep(points, values, k=self.degree)

        if isinstance(lo, np.ndarray):
            if len(up) != len(lo):
                raise ValueError(
                    'lower and upper bounds expected to have same shape!')
            return np.array(
                [splint(lo[i], up[i], tck) for i in range(len(lo))])
        else:
            return splint(lo, up, tck)
Ejemplo n.º 2
0
 def setGrid(self):
     from scipy import interpolate
     x0 = numpy.logspace(-3,1,81)
     etas = numpy.linspace(0.,2.,21)
     qs = numpy.linspace(0.2,1.,17)
     grid1 = numpy.empty((x0.size,x0.size,etas.size,qs.size))
     grid2 = numpy.empty(grid1.shape)
     for i in range(qs.size):
         q = qs[i]
         q2 = q**2
         b = 1-q2
         for j in range(etas.size):
             eta = etas[j]
             g = 0.5*eta-1. # g = -1*gamma
             for k in range(x0.size):
                 x = x0[k]
                 for l in range(x0.size):
                     y = x0[l]
                     qb = ((2*x*y)/b)**g  # q_bar
                     qt = q*(x*y)**0.5/b  # q_tilde
                     sb = 0.5*(x/y - y/x) + s**2*b/(2*x*y)
                     nu1 = s**2*b/(2*x*y)
                     nu2 = nu1+ 0.5*b*(x/y + y/(x*q2))
                     nu = numpy.logspace(nu1,nu2,1001)
                     mu = nu-sb
                     t = (1+mu**2)**0.5
                     f1 = (t-mu)**0.5/t
                     f2 = (t+mu)**0.5/t
                     ng = nu**g
                     I1 = interpolate.splrep(nu,f1*ng)
                     I2 = interpolate.splrep(nu,f2*ng)
                     grid1[k,l,i,j] = qt*interpolate.splint(nu1,nu2,I1)
                     grid2[k,l,i,j] = qt*interpolate.splint(nu1,nu2,I2)
             pylab.imshow(grid1[:,:,i,j])
             pylab.show()
Ejemplo n.º 3
0
def rho(z0, rhopar, pop, gp):
    vec = rhopar
    rho_at_rhalf = vec[0]
    vec = vec[1:]
    # get spline representation on gp.xepol, where rhopar are defined on
    spline_n = nr(gp.xepol, vec, pop, gp)

    # and apply it to these radii, which may be anything in between
    zs =  np.log(z0/gp.Xscale[pop]) # have to integrate in d log(r)
    logrright = []; logrleft = []
    if np.rank(zs) == 0:
        if zs>0:
            logrright.append(zs)
        else:
            logrleft.append(zs)
    else:
        logrright = zs[(zs>=0.)]
        logrleft  = zs[(zs<0.)]
        logrleft  = logrleft[::-1] # inverse order

    # integrate to left and right of halflight radius
    logrhoright = []
    for i in np.arange(0, len(logrright)):
        logrhoright.append(np.log(rho_at_rhalf) + \
                           splint(0., logrright[i], spline_n))
                           # integration along dlog(r) instead of dr

    logrholeft = []
    for i in np.arange(0, len(logrleft)):
        logrholeft.append(np.log(rho_at_rhalf) + \
                          splint(0., logrleft[i], spline_n))

    tmp = np.exp(np.hstack([logrholeft[::-1], logrhoright])) # still defined on log(r)
    gh.checkpositive(tmp, 'rho()')
    return tmp
Ejemplo n.º 4
0
def energy_compute(abscissa, function):
    hbar = 1
    omega = 1
    m = 1
    a = -5.
    b = 5.

    #interpolations
    tck_true = interpolate.splrep(abscissa, function, k=3, s=0)  #W.F.
    tck_true_carre = interpolate.splrep(abscissa,
                                        function * function,
                                        k=3,
                                        s=0)  #W.F. squared
    tck_true_x = interpolate.splrep(abscissa,
                                    abscissa * abscissa * function * function,
                                    k=3,
                                    s=0)  #W.F. squared*x^2
    der_true = interpolate.splev(abscissa, tck_true, der=1)  #W.F. derivative
    tck_true_der = interpolate.splrep(abscissa, der_true * der_true, k=3,
                                      s=0)  #W.F. derivative spline 1000
    int_true_carre = interpolate.splint(
        a, b, tck_true_carre)  #integral of W.F. squared
    int_true_x = interpolate.splint(
        a, b, tck_true_x)  #integral of W.F. squared*x^2 (<x^2>)
    int_true_der = interpolate.splint(
        a, b, tck_true_der)  #integral of derivative squared
    #energy
    Energy = ((-pow(hbar, 2) / (2 * m)) *
              (function[-1] * der_true[-1] - function[0] * der_true[0] -
               int_true_der) + 0.5 * m * omega * int_true_x) / int_true_carre
    return Energy
Ejemplo n.º 5
0
def radialConvolve(r, f, sigma, fk=100, fr=1):
    from scipy.special import j0
    import special_functions as sf
    #mod = splrep(r,f,s=0,k=1)
    #norm = splint(r[0],r[-1],mod)
    r0 = r.copy()
    f0 = f.copy()
    r = r / sigma
    sigma = 1.

    kmin = numpy.log10(r.max()) * -1
    kmax = numpy.log10(r.min()) * -1
    k = numpy.logspace(kmin, kmax, r.size * fr)
    a = k * 0.
    for i in range(k.size):
        bessel = j0(r * k[i])
        A = splrep(r, r * bessel * f, s=0, k=1)
        a[i] = splint(0., r[-1], A)

    a0 = (2. * pi * sigma**2)**-0.5
    b = a0 * sigma * numpy.exp(-0.5 * k**2 * sigma**2)

    ab = a * b
    mod = splrep(k, ab, s=0, k=1)
    k = numpy.logspace(kmin, kmax, r.size * fk)
    ab = splev(k, mod)
    result = r * 0.
    for i in range(r.size):
        bessel = j0(k * r[i])
        mod = splrep(k, k * bessel * ab, s=0, k=1)
        result[i] = 2 * pi * splint(0., k[-1], mod)

    return result
Ejemplo n.º 6
0
def VegaFilterMagnitude(filter,spectrum,redshift):
    """
    Determines the Vega magnitude (up to a constant) given an input filter,
        SED, and redshift.
    """
    from scipy.interpolate import splev,splint,splrep
    from scipy.integrate import simps
    from math import log10

    wave = spectrum[0].copy()
    data = spectrum[1].copy()

    # Redshift the spectrum and determine the valid range of wavelengths
    wave *= (1.+redshift)
    data /= (1.+redshift)
    wmin,wmax = filter[0][0],filter[0][-1]
    cond = (wave>=wmin)&(wave<=wmax)

    # Evaluate the filter at the wavelengths of the spectrum
    response = splev(wave[cond],filter)

    # Determine the total observed flux (without the bandpass correction)
    observed = splrep(wave[cond],(response*data[cond]),s=0,k=1)
    flux = splint(wmin,wmax,observed)

    # Determine the magnitude of Vega through the filter
    vwave,vdata = getSED('Vega')
    cond = (vwave>=wmin)&(vwave<=wmax)
    response = splev(vwave[cond],filter)
    vega = splrep(vwave[cond],response*vdata[cond],s=0,k=1)
    vegacorr = splint(wmin,wmax,vega)

    return -2.5*log10(flux/vegacorr)#+2.5*log10(1.+redshift)
Ejemplo n.º 7
0
def VegaFilterMagnitude(filter, spectrum, redshift):
    """
    Determines the Vega magnitude (up to a constant) given an input filter,
        SED, and redshift.
    """
    from scipy.interpolate import splev, splint, splrep
    from scipy.integrate import simps
    from math import log10

    wave = spectrum[0].copy()
    data = spectrum[1].copy()

    # Redshift the spectrum and determine the valid range of wavelengths
    wave *= (1. + redshift)
    data /= (1. + redshift)
    wmin, wmax = filter[0][0], filter[0][-1]
    cond = (wave >= wmin) & (wave <= wmax)

    # Evaluate the filter at the wavelengths of the spectrum
    response = splev(wave[cond], filter)

    # Determine the total observed flux (without the bandpass correction)
    observed = splrep(wave[cond], (response * data[cond]), s=0, k=1)
    flux = splint(wmin, wmax, observed)

    # Determine the magnitude of Vega through the filter
    vwave, vdata = getSED('Vega')
    cond = (vwave >= wmin) & (vwave <= wmax)
    response = splev(vwave[cond], filter)
    vega = splrep(vwave[cond], response * vdata[cond], s=0, k=1)
    vegacorr = splint(wmin, wmax, vega)

    return -2.5 * log10(flux / vegacorr)  #+2.5*log10(1.+redshift)
Ejemplo n.º 8
0
    def scale(self, band2, band1, madau=True):
        zs = self.zs

        a2 = self.tn % 1
        a1 = 1. - a2
        n1 = int(np.floor(self.tn))
        n2 = int(np.ceil(self.tn))

        bands_here = [band2, band1]
        temp_coeffs = [a1, a2]
        temp_ind = [n1, n2]

        temp_fband = []
        for n in temp_ind:

            flambda = self.templates[n][1].copy()
            wav = self.templates[n][0].copy()

            wobs = wav * (1. + zs)

            fband_here = []

            for band in bands_here:
                wrange = (wobs > self.filtranges[band][0]) & (
                    wobs < self.filtranges[band][1])
                wband = wobs[wrange]
                flambda_here = flambda[wrange]

                if madau:
                    madau_corr = etau_madau(wband, zs)
                    flambda_here *= madau_corr

                nu_here = 1. / wband
                fnu_here = flambda_here / nu_here**2
                weights = splev(wband, self.filtsplines[band])

                nu_here = nu_here[::-1]
                fnu_here = fnu_here[::-1]
                weights = weights[::-1]

                num_integrand = splrep(nu_here,
                                       weights * fnu_here / nu_here,
                                       k=1)
                den_integrand = splrep(nu_here, weights / nu_here, k=1)

                num = splint(nu_here[0], nu_here[-1], num_integrand)
                den = splint(nu_here[0], nu_here[-1], den_integrand)

                fband_here.append(num / den)

            temp_fband.append(fband_here)

        ratio = (a1 * temp_fband[0][0] + a2 * temp_fband[1][0]) / (
            a1 * temp_fband[0][1] + a2 * temp_fband[1][1])

        return ratio * 10.**((self.zp[band2] - self.zp[band1]) / 2.5)
Ejemplo n.º 9
0
 def _integrate_spl_scalar(self, ui, spl):
     if self.active == 'R':  # passive ---- active
         return -interpolate.splint(self._t_active, ui, spl)
     elif self.active == 'L':  # active ---- passive
         return interpolate.splint(self._t_active, ui, spl)
     else:  # active ---- active
         if ui <= self._t_passive:
             return interpolate.splint(self._t_active[0], ui, spl)
         else:
             return -interpolate.splint(self._t_active[1], ui, spl)
Ejemplo n.º 10
0
    def test_x2_surrounds_x1_sine_spline(self):
        """
        x2 range is completely above x1 range
        using a random vector to build spline
        """
        # old size
        m = 5

        # new size
        n = 6

        # bin edges
        x_old = np.linspace(0., 1., m + 1)
        x_new = np.array([-.3, -.09, 0.11, 0.14, 0.2, 0.28, 0.73])

        subbins = np.array([-.3, -.09, 0., 0.11, 0.14, 0.2, 0.28, 0.4, 0.6,
                            0.73])

        y_old = 1. + np.sin(x_old[:-1] * np.pi)

        # compute spline ----------------------------------
        x_mids = x_old[:-1] + 0.5 * np.ediff1d(x_old)
        xx = np.hstack([x_old[0], x_mids, x_old[-1]])
        yy = np.hstack([y_old[0], y_old, y_old[-1]])

        # build spline
        spl = splrep(xx, yy)

        area_old = np.array([splint(x_old[i], x_old[i + 1], spl)
                             for i in range(m)])

        # computing subbin areas
        area_subbins = np.zeros((subbins.size - 1,))
        for i in range(area_subbins.size):
            a, b = subbins[i: i + 2]
            a = max([a, x_old[0]])
            b = min([b, x_old[-1]])
            if b > a:
                area_subbins[i] = splint(a, b, spl)

        # summing subbin contributions in y_new_ref
        y_new_ref = np.zeros((x_new.size - 1,))
        y_new_ref[1] = y_old[0] * area_subbins[2] / area_old[0]
        y_new_ref[2] = y_old[0] * area_subbins[3] / area_old[0]
        y_new_ref[3] = y_old[0] * area_subbins[4] / area_old[0]
        y_new_ref[4] = y_old[1] * area_subbins[5] / area_old[1]

        y_new_ref[5]  = y_old[1] * area_subbins[6] / area_old[1]
        y_new_ref[5] += y_old[2] * area_subbins[7] / area_old[2]
        y_new_ref[5] += y_old[3] * area_subbins[8] / area_old[3]

        # call rebin function
        y_new = rebin.rebin(x_old, y_old, x_new, interp_kind=3)

        assert_allclose(y_new, y_new_ref)
Ejemplo n.º 11
0
def test_x2_surrounds_x1_sine_spline():
    """
    x2 range is completely above x1 range
    using a random vector to build spline
    """
    # old size
    m = 5

    # new size
    n = 6

    # bin edges
    x_old = np.linspace(0., 1., m + 1)
    x_new = np.array([-.3, -.09, 0.11, 0.14, 0.2, 0.28, 0.73])

    subbins = np.array([-.3, -.09, 0., 0.11, 0.14, 0.2, 0.28, 0.4, 0.6, 0.73])

    y_old = 1. + np.sin(x_old[:-1] * np.pi)

    # compute spline ----------------------------------
    x_mids = x_old[:-1] + 0.5 * np.ediff1d(x_old)
    xx = np.hstack([x_old[0], x_mids, x_old[-1]])
    yy = np.hstack([y_old[0], y_old, y_old[-1]])

    # build spline
    spl = splrep(xx, yy)

    area_old = np.array(
        [splint(x_old[i], x_old[i + 1], spl) for i in range(m)])

    # computing subbin areas
    area_subbins = np.zeros((subbins.size - 1, ))
    for i in range(area_subbins.size):
        a, b = subbins[i:i + 2]
        a = max([a, x_old[0]])
        b = min([b, x_old[-1]])
        if b > a:
            area_subbins[i] = splint(a, b, spl)

    # summing subbin contributions in y_new_ref
    y_new_ref = np.zeros((x_new.size - 1, ))
    y_new_ref[1] = y_old[0] * area_subbins[2] / area_old[0]
    y_new_ref[2] = y_old[0] * area_subbins[3] / area_old[0]
    y_new_ref[3] = y_old[0] * area_subbins[4] / area_old[0]
    y_new_ref[4] = y_old[1] * area_subbins[5] / area_old[1]

    y_new_ref[5] = y_old[1] * area_subbins[6] / area_old[1]
    y_new_ref[5] += y_old[2] * area_subbins[7] / area_old[2]
    y_new_ref[5] += y_old[3] * area_subbins[8] / area_old[3]

    # call rebin function
    y_new = rebin.rebin(x_old, y_old, x_new, interp_kind=3)

    assert_allclose(y_new, y_new_ref)
Ejemplo n.º 12
0
def rebin(x, y, xnew, conserve_count=True):
    dx = get_dx(x)
    dxnew = get_dx(xnew)
    if conserve_count:  # count conserved (input is per pix)
        spl = splrep(x, y / dx, k=1, task=0, s=0)
        return np.array([splint(xn-0.5*dxn,xn+0.5*dxn,spl) \
          for xn,dxn in zip(xnew,dxnew)])
    else:  #flux conserved (input is in physical unit)
        spl = splrep(x, y, k=1, task=0, s=0)
        return np.array([splint(xn-0.5*dxn,xn+0.5*dxn,spl)/dxn \
          for xn,dxn in zip(xnew,dxnew)])
Ejemplo n.º 13
0
def GP_int(rec):
    z,g,sig=rec
    n=len(z)
    tck = splrep(z,g)
    gint=np.zeros(n)
    for i in range(n):
        gint[i]=splint(0,z[i],tck)
    tck_s=splrep(z, g+sig)
    gint_s=np.zeros(n)
    for i in range(n):
        gint_s[i]=splint(0,z[i],tck_s)
    return z,gint,gint_s-gint
Ejemplo n.º 14
0
def kappa(r0fine, Mrfine, nufine, sigr2nu, intbetasfine, gp):
    # for the following: enabled calculation of kappa

    # kappa_r^4
    kapr4nu = np.ones(len(r0fine)-gp.nexp)
    xint  = r0fine                  # [pc]
    yint  = gu.G1__pcMsun_1km2s_2 * Mrfine/r0fine**2  # [1/pc (km/s)^2]
    yint *= nufine                  # [Munit/pc^4 (km/s)^2]
    yint *= sigr2nu               # [Munit/pc^4 (km/s)^4
    yint *= np.exp(intbetasfine)          # [Munit/pc^4 (km/s)^4]
    gh.checkpositive(yint, 'yint in kappa_r^4')
    yscale = 10.**(1.-min(np.log10(yint[1:])))
    yint *= yscale
    # power-law extrapolation to infinity
    C = max(0., gh.quadinflog(xint[-3:], yint[-3:], r0fine[-1], gp.rinfty*r0fine[-1]))

    splpar_nu = splrep(xint, yint, k=3) # interpolation in real space
    for k in range(len(r0fine)-gp.nexp):
        # integrate from minimal radius to infinity
        kapr4nu[k] = 3.*(np.exp(-intbetasfine[k])/nufine[k]) * \
            (splint(r0fine[k], r0fine[-1], splpar_nu) + C) # [(km/s)^4]

    kapr4nu /= yscale
    gh.checkpositive(kapr4nu, 'kapr4nu in kappa_r^4')

    splpar_kap = splrep(r0fine[:-gp.nexp], np.log(kapr4nu), k=3)
    kapr4ext = np.exp(splev(r0ext, splpar_kap))
    kapr4nu = np.hstack([kapr4nu, kapr4ext])
    gh.checkpositive(kapr4nu, 'kapr4nu in extended kappa_r^4')

    dbetafinedr = splev(r0fine, splrep(r0fine, betafine), der=1)
    gh.checknan(dbetafinedr, 'dbetafinedr in kappa_r^4')

    # kappa^4_los*surfdensity
    kapl4s = np.zeros(len(r0fine)-gp.nexp)
    for k in range(len(r0fine)-gp.nexp):
        xnew = np.sqrt(r0fine[k:]**2-r0fine[k]**2)      # [pc]
        ynew = g(r0fine[k:], r0fine[k], betafine[k:], dbetafinedr[k:]) # [1]
        ynew *= nufine[k:] * kapr4nu[k:]
        C = max(0., gh.quadinflog(xnew[-3:], ynew[-3:], xnew[-1], gp.rinfty*xnew[-1]))
        splpar_nu = splrep(xnew,ynew) # not s=0.1, this sometimes gives negative entries after int
        kapl4s[k] = 2. * (splint(0., xnew[-1], splpar_nu) + C)
        #kapl4s[k] /= yscale
        # LOG('ynew = ',ynew,', kapl4s =', kapl4s[k])

    gh.checkpositive(kapl4s, 'kapl4s in kappa_r^4')

    # project kappa4_los as well
    # only use middle values to approximate, without errors in center and far
    kapl4s_out = np.exp(splev(r0, splrep(r0fine[4:-gp.nexp], kapl4s[4:], k=3))) # s=0.
    gh.checkpositive(kapl4s_out, 'kapl4s_out in kappa_r^4')
    return sigl2s_out, kapl4s_out
Ejemplo n.º 15
0
 def update(self, x, dt):
     self.curr_time += dt
     self.samples.append(x)
     self.time.append(self.curr_time)
     if len(self.samples) > 4:
         self.samples = self.samples[1:]
         self.time = self.time[1:]
         tck = interpolate.splrep(self.time,self.samples)
         if self.warmup:
             self.integral += interpolate.splint(self.time[-2],self.time[-1],tck)
         else:
             self.integral += interpolate.splint(self.time[0],self.time[-1],tck)
             self.warmup = True
Ejemplo n.º 16
0
def get_mag_from_sed(wave,
                     llambda,
                     redshift,
                     filtname,
                     cosmo=pygalaxev_cosmology.default_cosmo):

    filtdir = os.environ.get('PYGALAXEVDIR') + '/filters/'
    Dlum = pygalaxev_cosmology.Dlum(redshift,
                                    cosmo=cosmo)  # luminosity distance in Mpc

    wave_obs = wave * (1. + redshift)
    flambda_obs = llambda * L_Sun / (4. * np.pi * (Dlum * Mpc)**2) / (
        1. + redshift)  # observed specific flux in erg/s/cm^2/AA
    fnu = flambda_obs * wave_obs**2 / csol * 1e-8  # F_nu in cgs units

    nu_obs = np.flipud(csol / wave_obs * 1e8)
    fnu = np.flipud(fnu)

    fullfiltname = filtdir + filtname

    f = open(fullfiltname, 'r')
    filt_wave, filt_t = np.loadtxt(f, unpack=True)
    f.close()

    filt_spline = splrep(filt_wave, filt_t)

    wmin_filt, wmax_filt = filt_wave[0], filt_wave[-1]
    cond_filt = (wave_obs >= wmin_filt) & (wave_obs <= wmax_filt)
    nu_cond = np.flipud(cond_filt)

    # Evaluate the filter response at the wavelengths of the spectrum
    response = splev(wave_obs[cond_filt], filt_spline)
    nu_filter = csol * 1e8 / wave_obs[cond_filt]

    # flips arrays
    response = np.flipud(response)
    nu_filter = np.flipud(nu_filter)

    # filter normalization
    bp = splrep(nu_filter, response / nu_filter, s=0, k=1)
    bandpass = splint(nu_filter[0], nu_filter[-1], bp)

    # Integrate
    observed = splrep(nu_filter, response * fnu[nu_cond] / nu_filter, s=0, k=1)
    flux = splint(nu_filter[0], nu_filter[-1], observed)

    mag = -2.5 * np.log10(flux / bandpass) - 48.6

    return mag
Ejemplo n.º 17
0
    def integrate(self, a=0., b=2*np.pi):
        """ Find the definite integral of the spline from a to b """

        # Are both a and b in (0, 2pi)?
        if (0 <= a <= 2*np.pi) and (0 <= b <= 2*np.pi):
            return splint(a, b, self._eval_args)
        elif ((a <= 0) and (b <= 0)) or ((a >= 2*np.pi) 
                                         and (b >= 2*np.pi)):
            return splint(a%(2*np.pi), b%(2*np.pi), self._eval_args)

        elif (a <= 0) or (b >= 2*np.pi):
            int = 0
            int += splint(a%(2*np.pi), 2*np.pi, self._eval_args)
            int += splint(0, b%(2*np.pi), self._eval_args)
            return int
def integrate_smooth(accel, dt=1e-2, init_motion=None):
    if init_motion is None:
        init_motion = np.zeros((3, 3))
    time = (np.arange(accel.shape[1], dtype=np.float64)) * dt
    vel = np.zeros_like(accel)
    pose = np.zeros_like(accel)
    for i in range(3):
        tck = interpolate.splrep(time, accel[i], s=0, k=1)
        for j in range(accel[i].shape[0]):
            vel[i, j] = interpolate.splint(0, time[j], tck)
        vel[i] += init_motion[1, i]
        tck = interpolate.splrep(time, vel[i], s=0, k=1)
        for j in range(vel[i].shape[0]):
            pose[i, j] = interpolate.splint(0, time[j], tck)
        pose[i] += init_motion[2, i]
    return accel, vel, pose
Ejemplo n.º 19
0
def ObjectiveFunction(sigma, knots, ts, ois_rate, lib_rate):
	"""
	constructs the B-spline for the given knots, times( in years), 
	OIS rates, and LIBOR rates for the corresponding times. Calculates
	the objective function for the given parameters.
	
	
	sigma = float (lambda from equation (40) on page 23 of lecture 1 notes)
	knots = numpy array
	ts = numpy array
	ois_rate = numpy array
	lib_rate = numpy array
	"""
		
	tck1 = interp.splrep(ts, ois_rate, t = knots)
	tck2 = interp.splrep(ts, lib_rate, t = knots)
	
	approx1 = interp.splev(ts,tck1)
	approx2 = interp.splev(ts,tck2)
	
	partial_sum = ((approx1 - ois_rate)**2).sum()
	partial_sum += (sum((approx2 - lib_rate)**2)).sum()
	partial_sum *= 0.5
		
	new_tck = []
	new_tck.append(knots)
	new_tck.append((interp.splev(ts,tck1,der=2))**2 + (interp.splev(ts, tck2, der=2)**2))
	new_tck.append(3)
	
	temp = interp.splint(ts[0], ts[-1], new_tck)
		
	partial_sum += 0.5*sigma*temp
	
	return partial_sum
Ejemplo n.º 20
0
def cont_kldivergence(data, prior):
    """
    Calculates the Kullback–Leibler divergence for a continuous distribution.

    data: samples from the posterior distribution
    prior: pdf of the prior distribution
    """
    x, p = np.histogram(data, bins='auto')
    x = np.array(x)/np.sum(x)
    h = len(x)
    item = 0
    zeroitem = False
    x1 = []
    p1 = []
    for i in range(h):
        if x[i] > 0 and not zeroitem:
            x1.append(x[i] / (p[1] - p[0]))
            p1.append(np.mean((p[i], p[i + 1])))
            item += 1
        if x[i] == 0 and not zeroitem:
            zeroitem = True
            initial = i
        if x[i] > 0 and zeroitem:
            zeroitem = False
            x1.append(x[i] / (p[i + 1] - p[initial]))
            p1.append(np.mean(p[initial:i + 1]))
            item += 1
    x1 = np.array(x1)
    q1 = prior(p1)
    x2 = x1*np.log(x1/q1)
    tck = interpolate.splrep(p1, x2)
    return interpolate.splint(min(p1), max(p1), tck)
Ejemplo n.º 21
0
def test_splint():
    """
    Evaluate the definite integral of a B-spline.
    Given the knots and coefficients of a B-spline, evaluate the definite
    integral of the smoothing polynomial between two given points.
    Parameters
    ----------
    a, b : float
        The end-points of the integration interval.
    tck : tuple
        A tuple (t,c,k) containing the vector of knots, the B-spline
        coefficients, and the degree of the spline (see `splev`).
    full_output : int, optional
        Non-zero to return optional output.
    Returns
    -------
    integral : float
        The resulting integral.
    wrk : ndarray
        An array containing the integrals of the normalized B-splines
        defined on the set of knots.
    """
    x = linspace(0,10,10)
    y = sin(x)
    tck = splrep(x, y)
    y2 = splint(x[0],x[-1],tck)
    print y2
Ejemplo n.º 22
0
 def GetSourceSize(self,kpc=False):
     self.z=source_redshifts[self.name]
     self.Da = astCalc.da(self.z)
     self.scale = self.Da*1e3*np.pi/180./3600.
     if len(self.srcs) == 1 or self.name == 'J0837':
         self.Re_v = self.Ddic['Source 1 re']*0.05
         self.Re_i = self.Re_v.copy()
         self.Re_lower = self.Ldic['Source 1 re']*0.05
         self.Re_upper = self.Udic['Source 1 re']*0.05
     elif len(self.srcs) == 2 and self.name != 'J0837':
         print 'test this out...!'
         Xgrid = np.logspace(-4,5,1501)
         Res = []
         for i in range(len(self.imgs)):
             #if self.name == 'J1605':
             #    source = 
             source = self.fits[i][-3]*self.srcs[0].eval(Xgrid) + self.fits[i][-2]*self.srcs[1].eval(Xgrid)
             R = Xgrid.copy()
             light = source*2.*np.pi*R
             mod = splrep(R,light,t=np.logspace(-3.8,4.8,1301))
             intlight = np.zeros(len(R))
             for i in range(len(R)):
                 intlight[i] = splint(0,R[i],mod)
             model = splrep(intlight[:-300],R[:-300])
             
             if len(model[1][np.where(np.isnan(model[1])==True)]>0):
                 print "arrays need to be increasing monotonically! But don't worry about it"
                 model = splrep(intlight[:-450],R[:-450])
             reff = splev(0.5*intlight[-1],model)
             Res.append(reff*0.05)
         self.Re_v,self.Re_i = Res
     if kpc:
         return [self.Re_v*self.scale, self.Re_i*self.scale]
     return [self.Re_v, self.Re_i]
Ejemplo n.º 23
0
    def get_alpha(self):
        """
        Calculates self.alpha - the likelihood that a point is
        part of an ELM peak based on its intensity value.
        """
        # create a series of times across the data which serve as the centre
        # of time windows with half-width self.w
        t_c = np.linspace(np.min(self.t), np.max(self.t), self.m)
        u = np.zeros(self.m)
        for i in range(self.m):
            # find all data in the current time-window
            booles = (self.t > t_c[i] - self.w) & (self.t < t_c[i] + self.w)
            # sort the y-data in the window
            order = np.sort(self.y[np.where(booles)])
            # estimate the self.rho'th percentile for the window
            u[i] = order[np.round(self.rho * len(order)).astype(int)]

        # smoothing of threshold level data
        u = self.smooth(u)

        # set threshold at ends of the data
        dt_c = t_c[1] - t_c[0]
        ind = np.round(self.w / dt_c).astype(int)
        u[:ind] = u[ind]
        u[-ind:] = u[-ind]

        # use a spline to determine
        spline = splint(t_c, u)
        self.tau = spline(self.t)
        self.alpha = np.array(self.y >= self.tau)
Ejemplo n.º 24
0
def printIsigma2OM(R, r, M, light_profile, lp_args, riso, infile):
    from numpy import arctan
    if type(R) == type(1.):
        R = numpy.asarray([R])
    light = light_profile(r, lp_args)
    a = lp_args

    model = interpolate.splrep(r, M * light, k=3, s=0)
    result = R * 0.
    ua2 = (riso / R)**2
    t1 = (ua2 + 0.5) / (ua2 + 1.)**1.5
    for i in range(R.size):
        outfile = infile + '_%03d.txt' % (i)
        reval = logspace(log10(R[i]), log10(r[-1]), 301)
        reval[0] = R[i]  # Avoid sqrt(-epsilon)
        Mlight = interpolate.splev(reval, model)
        u = reval / R[i]
        u2 = u * u
        ua = ua2[i]
        K = t1[i] * (u2 + ua) * arctan(
            ((u2 - 1.) /
             (ua + 1.))**0.5) / u - 0.5 * (1. - 1. / u2)**0.5 / (ua + 1.)
        integrand = K * Mlight / reval
        mod = interpolate.splrep(reval, K * Mlight / reval, k=3, s=0)
        result[i] = 2. * interpolate.splint(R[i], reval[-1], mod)

        f = open(outfile, 'w')
        for (ip, jp) in zip(reval, integrand):
            f.write("%f\t%.10g\n" % (ip, jp))
        f.close()
    return result
Ejemplo n.º 25
0
 def GetSourceSize(self,z):
     self.z=z
     self.Da = astCalc.da(self.z)
     self.scale = self.Da*np.pi/180./3600.
     if len(self.srcs) == 1:
         self.Re = self.Ddic['Source 1 re']*0.05
         self.Re_lower = self.Ldic['Source 1 re']*0.05
         self.Re_upper = self.Udic['Source 1 re']*0.05
         self.Re_kpc = self.Re*self.scale
         return self.Re
     elif len(self.srcs) == 2:
         print 'test this out...!'
         Xgrid = np.logspace(-4,5,1501)
         Ygrid = np.logspace(-4,5,1501)
         Res = []
         for i in range(len(self.imgs)):
             source = self.fits[i][-3]*self.srcs[0].eval(Xgrid) + self.fits[i][-2]*self.srcs[1].eval(Xgrid)
             R = Xgrid.copy()
             light = source*2.*np.pi*R
             mod = splrep(R,light,t=np.logspace(-3.8,4.8,1301))
             intlight = np.zeros(len(R))
             for i in range(len(R)):
                 intlight[i] = splint(0,R[i],mod)
             model = splrep(intlight[:-300],R[:-300])
             reff = splev(0.5*intlight[-1],model)
             Res.append(reff*0.05)
         self.Re_v,self.Re_i = Res
         return self.Re_v, self.Re_i
Ejemplo n.º 26
0
def spline_integral(x, y, x1=None, x2=None):
    if not x1:  # Integration limits
        x1 = x.min()
    if not x2:
        x2 = x.max()
    tck = interpolate.splrep(x, y, s=0)  # Spline representation
    return interpolate.splint(x1, x2, tck)  # Integral
Ejemplo n.º 27
0
def calc_tissue_tac(input_tac, mtt, bv, t, lag=0):
    """
    Calculate Time/Attenuation Curve (TAC) of tissue from input TAC smoothed with spline


    Args:
      input_tac (tuple): is argument to scipy.interpolate.splint(..., tck, ...)
      mtt (float): mean transit time of tissue in seconds
      bv (float): tissue blood volume. Should be between 0 and 1
      t (np.array): time steps of output TAC
      lag (float): time which input TAC needed to get to the tissue

    Returns:
      (np.array): tissue TAC with in defined time steps
    """
    if not 0 <= bv <= 1:
        raise ValueError('bv should be in interval from 0 to 1')
    if mtt == 0:
        mtt += 0.01

    t2 = t - lag
    t2[t2 < t[0]] = t[0]
    from_t = t2 - mtt
    from_t[from_t < t2[0]] = t2[0]
    final_arr = np.array([interpolate.splint(ft, tt, input_tac) for ft, tt in zip(from_t, t2)])
    return (final_arr * bv) / mtt
Ejemplo n.º 28
0
def Isigma2TPE(R,r,M,light_profile,lp_args,anis_par):
    from scipy.special import gamma,betainc,beta as B
    from scipy.integrate import quad
    if type(R)==type(1.):
        R = numpy.asarray([R])
    light = light_profile(r,lp_args)
    a = lp_args
    if type(anis_par) == tuple:
        ra, bi, bo = anis_par
    model = splrep(r,M*light,k=3,s=0)
    result = R*0.
    eps = 1e-6
    for i in range(R.size):
        reval = numpy.logspace(numpy.log10(R[i]),numpy.log10(r[-1]),301)
        reval[0] = R[i] # Avoid sqrt(-epsilon)
        Mlight = splev(reval,model)
        u = reval/R[i]
        ua = ra/R[i]
	FT1 = numpy.zeros(reval.size)
	for j in range(reval.size):
		FT1[j] = quad(FT,1.+eps,u[j],(ua,bi,bo))[0]
        f = u**(2*bi)*(u*u+ua*ua)**(bo-bi)
        K = 2.*FT1/(u)*f
	
	y= K*Mlight/reval
        mod = splrep(reval,y,k=3,s=0)
        result[i] = splint(R[i],r[-1],mod)
    return result
Ejemplo n.º 29
0
def calc_tissue_tac(input_tac, mtt, bv, t, lag=0):
    """
    Calculate Time/Attenuation Curve (TAC) of tissue from input TAC smoothed with spline


    Args:
      input_tac (tuple): is argument to scipy.interpolate.splint(..., tck, ...)
      mtt (float): mean transit time of tissue in seconds
      bv (float): tissue blood volume. Should be between 0 and 1
      t (np.array): time steps of output TAC
      lag (float): time which input TAC needed to get to the tissue

    Returns:
      (np.array): tissue TAC with in defined time steps
    """
    if not 0 <= bv <= 1:
        raise ValueError('bv should be in interval from 0 to 1')
    if mtt == 0:
        mtt += 0.01

    t2 = t - lag
    t2[t2 < t[0]] = t[0]
    from_t = t2 - mtt
    from_t[from_t < t2[0]] = t2[0]
    final_arr = np.array(
        [interpolate.splint(ft, tt, input_tac) for ft, tt in zip(from_t, t2)])
    return (final_arr * bv) / mtt
Ejemplo n.º 30
0
def ObjectiveFunction(sigma, knots, ts, ois_rate, lib_rate):
    """
	constructs the B-spline for the given knots, times( in years), 
	OIS rates, and LIBOR rates for the corresponding times. Calculates
	the objective function for the given parameters.
	
	
	sigma = float (lambda from equation (40) on page 23 of lecture 1 notes)
	knots = numpy array
	ts = numpy array
	ois_rate = numpy array
	lib_rate = numpy array
	"""

    tck1 = interp.splrep(ts, ois_rate, t=knots)
    tck2 = interp.splrep(ts, lib_rate, t=knots)

    approx1 = interp.splev(ts, tck1)
    approx2 = interp.splev(ts, tck2)

    partial_sum = ((approx1 - ois_rate)**2).sum()
    partial_sum += (sum((approx2 - lib_rate)**2)).sum()
    partial_sum *= 0.5

    new_tck = []
    new_tck.append(knots)
    new_tck.append((interp.splev(ts, tck1, der=2))**2 +
                   (interp.splev(ts, tck2, der=2)**2))
    new_tck.append(3)

    temp = interp.splint(ts[0], ts[-1], new_tck)

    partial_sum += 0.5 * sigma * temp

    return partial_sum
Ejemplo n.º 31
0
def cumulativeCurr(file_name):
  
  input_file = open(file_name,'r')
  index = -1
  all_time = []
  all_current = []
  Q = 0.0
  input_lines = input_file.readlines()
  input_file.close()
  for input_line in input_lines:
    index += 1
    tmp = input_line.split()
    t = float(tmp[0])
    I = float(tmp[1])
    all_time.append(t)
    all_current.append(I)
  
  
  sall_time = asarray(all_time)
  sall_current = asarray(all_current)
  
  
  
  
  
  splrepint = interpolate.splrep(sall_time, sall_current, s=0)
  
  for time in all_time:
    charge = interpolate.splint(sall_time[0], time, splrepint)
    print time,charge
Ejemplo n.º 32
0
 def pixeval(self,x,y):
     from numpy import cosh
     from math import pi
     from itertools import product
     from scipy.interpolate import splrep, splev, splint
     cos = numpy.cos(self.pa*pi/180.)
     sin = numpy.sin(self.pa*pi/180.)
     xp = (x-self.x)*cos+(y-self.y)*sin
     yp = (y-self.y)*cos-(x-self.x)*sin
     zp = numpy.logspace(-2,3,200)
     zp = numpy.concatenate((-zp[::-1],zp))
     array = numpy.zeros(xp.shape)
     print len(xp[0]), len(xp[1])
     for ii,j in product(range(len(xp[0])),range(len(xp[1]))):
         #print ii,j
         X = xp[ii,j]
         Y = yp[ii,j]*numpy.cos(self.i) + zp*numpy.sin(self.i)
         Z = -yp[ii,j]*numpy.sin(self.i) + zp*numpy.cos(self.i)
         rho = numpy.exp(-(X**2. + Y**2.)**0.5 / self.x0) /cosh(Z/self.y0)**2.
         mod = splrep(zp,rho)
         array[ii,j] = splint(zp[0],zp[-1],mod)
     import pylab as pl
     pl.figure()
     pl.plot(Z,rho)
     return array
Ejemplo n.º 33
0
 def integ(x, tck, constant=0):
     x = np.atleast_1d(x)
     out = np.zeros(x.shape[0], dtype=x.dtype)
     for n in xrange(len(out)):
         out[n] = interpolate.splint(0, x[n], tck)
 #    out += constant
     return out
Ejemplo n.º 34
0
def spline_integrate_real(t, y, a, b):
    """Computes the definite integral between bounds [a,b] of a function
  represented by samples contained in arrays t and y. t needs to 
  be strictly increasing.
  
  :param t:     Sample times.
  :type t:      1D numpy array.
  :param y:     Sample values.
  :type y:      1D numpy array.
  :param float a: Lower integration boundary.  
  :param float b: Upper integration boundary.  
  """
    a = float(a)
    b = float(b)
    if (a > b):
        a, b = b, a
        sf = -1
    else:
        sf = 1
    #
    if ((a < t[0]) or (b > t[-1])):
        raise ValueError('Integration bounds out of range.')
    #
    spl = interpolate.splrep(t, y, k=3, s=0)
    r = interpolate.splint(a, b, spl)
    return r * sf
Ejemplo n.º 35
0
    def _getWeights(self, a, b):
        """
        Computes weights using spline interpolation instead of Gaussian quadrature

        Args:
            a (float): left interval boundary
            b (float): right interval boundary

        Returns:
            np.ndarray: weights of the collocation formula given by the nodes
        """

        # get the defining tck's for each spline basis function
        circ_one = np.zeros(self.num_nodes)
        circ_one[0] = 1.0
        tcks = []
        for i in range(self.num_nodes):
            tcks.append(
                intpl.splrep(self.nodes,
                             np.roll(circ_one, i),
                             xb=self.tleft,
                             xe=self.tright,
                             k=self.order,
                             s=0.0))

        weights = np.zeros(self.num_nodes)
        for i in range(self.num_nodes):
            weights[i] = intpl.splint(a, b, tcks[i])

        return weights
Ejemplo n.º 36
0
Archivo: tmp.py Proyecto: YGskty/denso
def integ(x, tck, constant=-1):
    x = np.atleast_1d(x)
    out = np.zeros(x.shape, dtype=x.dtype)
    for n in xrange(len(out)):
        out[n] = interpolate.splint(0, x[n], tck)
    out += constant
    return out
Ejemplo n.º 37
0
def E_W(x,y):
    EW=0
    tck=interpolate.splrep(x,y)
    i=0
    while(i<99):
        EW+=interpolate.splint(x[i],x[i+1],tck)
        i+=1 
    return EW
Ejemplo n.º 38
0
def E_W(x,y): #ekvivalentna sirina, metodom cubic spline
    EkW=0
    tck=interpolate.splrep(x,y)
    i=0
    while(i<(N_tacaka-1)):
        EkW+=interpolate.splint(x[i],x[i+1],tck)
        i+=1 
    return EkW
Ejemplo n.º 39
0
def rapidity_integral(spec_along_y, ylo=-0.5, yhi=0.5):
    '''1D integration along rapidity/pseudo-rapidity 
    The spline interpolation and integration is much faster than
    the interp1d() and quad combination'''
    #f = interp1d(Y, spec_along_y, kind='cubic')
    #return quad(f, ylo, yhi, epsrel=1.0E-5)[0]
    tck = splrep(Y, spec_along_y)
    return splint(ylo, yhi, tck)
Ejemplo n.º 40
0
def N(mu, E, T, g, n=None):
  """
  Find number of electrons by integrating g(E) * f(E,T,mu) * E**n over all E

  Parameters:
    mu: chemical potential
    E: energy grid
    T: temperature
    g: DOS
    n: momentum
  """
  if n is None:
   tck = splrep(E, g*fermi(E,T,mu))
   return splint(E[0], E[-1], tck)
  else:
   tck = splrep(E, g*fermi(E,T,mu)*E**n)
   return splint(E[0], E[-1], tck)
Ejemplo n.º 41
0
 def fAnc_ext1(self, s):
     '''Funtion to minimize when searching the point from which the tendon 
     is not affected by the anchorage slip in extremity 1
     '''
     y = interpolate.splint(
         0.0, s, self.tckLossFric) - s * interpolate.splev(
             s, self.tckLossFric, der=0) - self.slip1 / 2.0
     return y
Ejemplo n.º 42
0
def E_W(x,y):
    EkW=0
    tck=interpolate.splrep(x,y)
    i=0
    while(i<(N_tacaka-1)):
        EkW+=interpolate.splint(x[i],x[i+1],tck)
        i+=1 
    return EkW
Ejemplo n.º 43
0
def integ(x, tck, constant = 10e-9):
    from scipy import interpolate
    x = np.atleast_1d(x)
    out = np.zeros(x.shape, dtype=x.dtype)
    for n in range(len(out)):
        out[n] = interpolate.splint(0, x[n], tck)
    out += constant
    return out
Ejemplo n.º 44
0
def integ(x,tck,constant=-1):
    import numpy as np
    x = np.atleast_1d(x)
    out = np.zeros(x.shape, dtype=x.dtype)
    for n in xrange(len(out)):
        out[n] = interpolate.splint(0,x[n],tck)
    out += constant
    return out
Ejemplo n.º 45
0
    def pixeval(self, x, y, scale=1, csub=23):
        from scipy import interpolate
        from math import pi, cos as COS, sin as SIN

        shape = x.shape
        x = x.ravel()
        y = y.ravel()

        cos = COS(self.pa * pi / 180.)
        sin = SIN(self.pa * pi / 180.)
        xp = (x - self.x) * cos + (y - self.y) * sin
        yp = (y - self.y) * cos - (x - self.x) * sin
        r = (self.q * xp**2 + yp**2 / self.q)**0.5

        k = 2. * self.n - 1. / 3 + 4. / (405. * self.n) + 46 / (25515. *
                                                                self.n**2)
        R = np.logspace(-5., 4., 451)  # 50 pnts / decade
        s0 = np.exp(-k * (R**(1. / self.n) - 1.))

        # Determine corrections for curvature
        rpow = R**(1. / self.n - 1.)
        term1 = (k * rpow / self.n)**2
        term2 = k * (self.n - 1.) * rpow / (R * self.n**2)
        wid = scale / self.re
        corr = (term1 + term2) * wid**3 / 6.
        try:
            minR = R[abs(corr) < 0.005].min()
        except:
            minR = 0

        # Evaluate model!
        model = interpolate.splrep(R, s0, k=3, s=0)
        R0 = r / self.re
        s = interpolate.splev(R0, model) * scale**2
        if self.n <= 1. or minR == 0:
            return self.amp * s.reshape(shape)
        model2 = interpolate.splrep(R, s0 * R * self.re**2, k=3, s=0)
        coords = np.where(R0 < minR)[0]
        c = (np.indices(
            (csub, csub)).astype(np.float32) - csub / 2) * scale / csub
        for i in coords:
            # The central pixels are tricky because we can't assume that we
            #   are integrating in delta-theta segments of an annulus; these
            #   pixels are treated separately by sub-sampling with ~500 pixels
            if R0[i] < 3 * scale / self.re:  # the pixels within 3*scale are evaluated by sub-sampling
                s[i] = 0.
                y0 = c[1] + y[i]
                x0 = c[0] + x[i]
                xp = (x0 - self.x) * cos + (y0 - self.y) * sin
                yp = (y0 - self.y) * cos - (x0 - self.x) * sin
                r0 = (self.q * xp**2 + yp**2 / self.q)**0.5 / self.re
                s[i] = interpolate.splev(r0.ravel(), model).mean() * scale**2
                continue
            lo = R0[i] - 0.5 * scale / self.re
            hi = R0[i] + 0.5 * scale / self.re
            angle = (scale / self.re) / R0[i]
            s[i] = angle * interpolate.splint(lo, hi, model2)
        return self.amp * s.reshape(shape)
Ejemplo n.º 46
0
 def fAnc_ext2(self, s):
     '''Funtion to minimize when searching the point from which the tendon 
     is not affected by the anchorage slip in extremity 2
     '''
     y = interpolate.splint(
         s, self.fineScoord[-1],
         self.tckLossFric) - (self.fineScoord[-1] - s) * interpolate.splev(
             s, self.tckLossFric, der=0) - self.slip2 / 2.0
     return y
Ejemplo n.º 47
0
	def lookback_time_n(self,N):
		"""
		Lookback time as a function of log(a)
		in units of billion years
		"""
		H=((1-self.sol()[999,1])*np.exp(-3*self.n1)/(1-self.sol()[:,1]))**(0.5)
		tck3=interpolate.splrep(self.n1,1/H,s=0)
		lt=interpolate.splint(N,0,tck3)
		return self.t_H()*lt
Ejemplo n.º 48
0
def integ(x, spline, constant=-1):
    x = np.atleast_1d(x)
    out = np.zeros(x.shape, dtype=x.dtype)
    
    for n in range(len(out)):
        out[n] = interpolate.splint(0, x[n], spline)
    out += constant
    
    return out
Ejemplo n.º 49
0
 def co_dis_n(self, N):
     H = ((1. - self.sol()[999, 0]**2. *
           (1 + self.sol()[999, 2]) - self.sol()[999, 1]**2.) *
          np.exp(-3 * self.n1) /
          ((1. - self.sol()[:, 0]**2. *
            (1 + self.sol()[:, 2]) - self.sol()[:, 1]**2.)))**(0.5)
     H1 = np.exp(-self.n1) / H
     tck3 = interpolate.splrep(self.n1, H1, s=0)
     rs = interpolate.splint(N, 0, tck3)
     return self.D_H() * rs
Ejemplo n.º 50
0
def summ_rule(curve, norm=1.0, xmin=None, xmax=None):
  """
  Normalize curve to set integral to a given value.
  """
  from scipy.interpolate import splrep, splint
  if xmin is None: xmin = curve.x[0]
  if xmax is None: xmax = curve.x[-1]

  tck = splrep(curve.x, curve.y)
  return splint(xmin, xmax, tck)
Ejemplo n.º 51
0
    def lookback_time_n(self, N):
        """
		Lookback time as a function of log(a)
		in units of billion years
		"""
        H = ((1 - self.sol()[999, 1]) * np.exp(-3 * self.n1) /
             (1 - self.sol()[:, 1]))**(0.5)
        tck3 = interpolate.splrep(self.n1, 1 / H, s=0)
        lt = interpolate.splint(N, 0, tck3)
        return self.t_H() * lt
Ejemplo n.º 52
0
	def co_dis_n(self,N):
		"""
		Line of sight comoving distance as a function of log(a)
		as described in David Hogg paper
		in units of Mpc
		"""
		H=((1-self.sol()[999,1])*np.exp(-3*self.n1)/(1-self.sol()[:,1]))**(0.5)
		H1=np.exp(-self.n1)/H
		tck3=interpolate.splrep(self.n1,H1,s=0)
		rs=interpolate.splint(N,0,tck3)
		return self.D_H()*rs
Ejemplo n.º 53
0
def _spline_integral(x, y, xmin=None, xmax=None):
  """
  Calculate spline integral of curve from xmin to xmax
  """
  from scipy.interpolate import splrep, splint

  if xmin is None: xmin = x[0]
  if xmax is None: xmax = x[-1]

  tck = splrep(x, y)
  return splint(xmin, xmax, tck)
Ejemplo n.º 54
0
def general_random(func,N,interval = (0.,1.)):
    xs = np.linspace(interval[0],interval[1],101)
    spline = splrep(xs,func(xs))
    intfunc = lambda x: splint(interval[0],x,spline)
    #    intfunc = lambda x: quad(func,interval[0],x)[0]

    norm = intfunc(interval[1])
    F = np.random.rand(N)*norm
    x = F*0.
    for i in range(0,N):
        x[i] = brentq(lambda x: intfunc(x) - F[i],interval[0],interval[1])
    return x
Ejemplo n.º 55
0
def ABFilterMagnitude(filter,spectrum,redshift):
    """
    Determines the AB magnitude (up to a constant) given an input filter, SED,
        and redshift.
    """
    from scipy.interpolate import splev,splint,splrep
    from scipy.integrate import simps
    from math import log10
    sol = 299792452.

    wave = spectrum[0].copy()
    data = spectrum[1].copy()

    # Convert to f_nu
    data = data*wave**2/(sol*1e10)

    # Redshift the spectrum and determine the valid range of wavelengths
    wave *= (1.+redshift)
    wmin,wmax = filter[0][0],filter[0][-1]
    cond = (wave>=wmin)&(wave<=wmax)

    # Evaluate the filter at the wavelengths of the spectrum
    response = splev(wave[cond],filter)

    freq = sol*1e10/wave[cond]
    data = data[cond]*(1.+redshift)

    # Flip arrays
    freq = freq[::-1]
    data = data[::-1]
    response = response[::-1]

    # Integrate
    observed = splrep(freq,response*data/freq,s=0,k=1)
    flux = splint(freq[0],freq[-1],observed)

    bp = splrep(freq,response/freq,s=0,k=1)
    bandpass = splint(freq[0],freq[-1],bp)

    return -2.5*log10(flux/bandpass) - 48.6
Ejemplo n.º 56
0
def N(mu, E,T,g):
  """
  Find number of electrons by integrating g(E) * f(E,T,mu) over all E

  Parameters:
    mu: chemical potential
    E: energy grid
    T: temperature
    g: DOS
  """

  tck = splrep(E, g*fermi(E,T,mu))
  return splint(E[0], E[-1], tck)
Ejemplo n.º 57
0
def integrateCurr4(file_name,t0,t1,iS):
  
  input_file = open(file_name,'r')
  t0 = float(t0)
  t1 = float(t1)
  index = -1
  all_time = []
  all_current = []
  Q = 0.0
  input_lines = input_file.readlines()
  input_file.close()
  Ileak = 0
  for input_line in input_lines:
    index += 1
    tmp = input_line.split()
    t = float(tmp[0])
    I = float(tmp[1])
    if ( index == 0 ):
      Ileak = I
    if ( t>t0 ):
      all_time.append(t)
      all_current.append(I)
  
  
  sall_time = asarray(all_time)
  sall_current = asarray(all_current)
  
  
  
  
  
  splrepint = interpolate.splrep(sall_time, sall_current, s=0)
  currentnew = interpolate.splev(sall_time, splrepint, der=0)
  initial_time = [t0/2.0]
  sinitial_time = asarray(initial_time)
  
  tf = 0
  if (t1 == -1):
    tf = sall_time[len(all_time)-1]
  else:
    tf = t1
  
  charge = interpolate.splint(sall_time[0], tf, splrepint)
  dt = tf-sall_time[0]
  #Ileak = sall_current[len(sall_time)-1]
  currentnew = interpolate.splev(sinitial_time, splrepint, der=0)
  #print "Ileak =", Ileak
  #print "currentnew =", currentnew
  if ( iS == 1 ):
    charge -= Ileak*dt
  return charge
Ejemplo n.º 58
0
    def make_numden_m_spline(self, scat=0, redshift=0.1):
        '''
        Make splines to relate d(num-den)/d(mag) & num-den(> mag) to mag.

        Import scatter [dex].
        '''
        try: 
            if redshift != self.redshift:
                self.initialize_redshift(redshift)
        except AttributeError:
            pass 
        if scat != self.scat:
            self.scat = scat    # convert scatter in log(lum) to scatter in magnitude
        mag_scat = 2.5 * self.scat
        deconvol_iter_num = 30
        dmag = 0.01
        dmag_scat_lo = 2 * mag_scat    # extend fit for b.c.'s of deconvolute
        dmag_scat_hi = 1 * mag_scat
        self.mmin = 17.0
        #self.mmax = 23.3
        self.mmax=24.
        mags = np.arange(self.mmin - dmag_scat_lo, self.mmax + dmag_scat_hi, dmag, np.float32)
        numdens = np.zeros(mags.size)
        dndms = np.zeros(mags.size)
        for mi in xrange(len(mags)):
            numdens[mi] = np.abs(self.numden(mags[mi]))
            dndms[mi] = self.dndm(mags[mi])
        #print 'numden ', numdens[:10]
        #print mags[:10]
        # make no scatter splines
        self.log_numden_m_spl = interpolate.splrep(mags, log10(numdens))
        self.dndm_m_spl = interpolate.splrep(mags, dndms)
        self.m_log_numden_spl = interpolate.splrep(log10(numdens)[::-1], mags[::-1])
        # make scatter splines
        if self.scat:
            # deconvolve observed lf assuming scatter to find unscattered one
            dndms_scat = deconvolute(dndms, mag_scat, dmag, deconvol_iter_num)
            # chop off boundaries, unreliable
            #print mags.min(), mags.max() 
            #mags = mags[dmag_scat_lo / dmag:-dmag_scat_hi / dmag]
            #dndms_scat = dndms_scat[dmag_scat_lo / dmag:-dmag_scat_hi / dmag]
            #print mags.min(), mags.max() 
            # find spline to integrate over
            self.dndm_m_scat_spl = interpolate.splrep(mags, dndms_scat)
            numdens_scat = np.zeros(mags.size)
            for mi in xrange(mags.size):
                numdens_scat[mi] = np.abs(interpolate.splint(mags[mi], mags.max(), self.dndm_m_scat_spl))
                numdens_scat[mi] += 1e-9 * (1 - mi * 0.001)
            self.log_numden_m_scat_spl = interpolate.splrep(mags, log10(numdens_scat))
            self.m_log_numden_scat_spl = interpolate.splrep(log10(numdens_scat)[::-1], mags[::-1])