コード例 #1
0
ファイル: tools.py プロジェクト: bnord/LensPop
def VegaFilterMagnitude(filter,spectrum,redshift):
    """
    Determines the Vega magnitude (up to a constant) given an input filter,
        SED, and redshift.
    """
    from scipy.interpolate import splev,splint,splrep
    from scipy.integrate import simps
    from math import log10

    wave = spectrum[0].copy()
    data = spectrum[1].copy()

    # Redshift the spectrum and determine the valid range of wavelengths
    wave *= (1.+redshift)
    data /= (1.+redshift)
    wmin,wmax = filter[0][0],filter[0][-1]
    cond = (wave>=wmin)&(wave<=wmax)

    # Evaluate the filter at the wavelengths of the spectrum
    response = splev(wave[cond],filter)

    # Determine the total observed flux (without the bandpass correction)
    observed = splrep(wave[cond],(response*data[cond]),s=0,k=1)
    flux = splint(wmin,wmax,observed)

    # Determine the magnitude of Vega through the filter
    vwave,vdata = getSED('Vega')
    cond = (vwave>=wmin)&(vwave<=wmax)
    response = splev(vwave[cond],filter)
    vega = splrep(vwave[cond],response*vdata[cond],s=0,k=1)
    vegacorr = splint(wmin,wmax,vega)

    return -2.5*log10(flux/vegacorr)#+2.5*log10(1.+redshift)
コード例 #2
0
ファイル: test_polyint.py プロジェクト: hitej/meta-core
 def setUp(self):
     self.tck = splrep([0,1,2,3,4,5], [0,10,-1,3,7,2], s=0)
     self.test_xs = np.linspace(-1,6,100)
     self.spline_ys = splev(self.test_xs, self.tck)
     self.spline_yps = splev(self.test_xs, self.tck, der=1)
     self.xi = np.unique(self.tck[0])
     self.yi = [[splev(x, self.tck, der=j) for j in xrange(3)] for x in self.xi]
コード例 #3
0
ファイル: mfn_line.py プロジェクト: sarosh-quraishi/simvisage
    def get_values(self, x, k = 1):
        '''
        vectorized interpolation, k is the spline order, default set to 1 (linear)
        '''
        tck = ip.splrep(self.xdata, self.ydata, s = 0, k = k)

        x = np.array([x]).flatten()

        if self.extrapolate == 'diff':
            values = ip.splev(x, tck, der = 0)
        elif self.extrapolate == 'exception':
            if x.all() < self.xdata[0] and x.all() > self.xdata[-1]:
                values = values = ip.splev(x, tck, der = 0)
            else:
                raise ValueError('value(s) outside interpolation range')

        elif self.extrapolate == 'constant':
            values = ip.splev(x, tck, der = 0)
            values[x < self.xdata[0]] = self.ydata[0]
            values[x > self.xdata[-1]] = self.ydata[-1]
        elif self.extrapolate == 'zero':
            values = ip.splev(x, tck, der = 0)
            values[x < self.xdata[0]] = 0.0
            values[x > self.xdata[-1]] = 0.0
        return values
コード例 #4
0
ファイル: prestressed_concrete.py プロジェクト: lcpt/xc
 def pntsInterpTendon(self,nPntsFine,smoothness,kgrade=3):
     '''Generates a cubic spline (default) or a spline of grade kgrade 
     interpolation from the rough points and calculates its value and 
     the value of its derivative in nPntsFine equispaced.
     Creates the following attributes:
     
     - fineCoordMtr: matrix with coordinates of the interpolated points
     [[x1,x2, ..],[y1,y2,..],[z1,z2,..]]
     - fineDerivMtr: matrix with the vector representing the derivative
     in each interpolated  
     - tck: tuple (t,c,k) containing the vector of knots, the B-spline 
            coefficients, and the degree of the spline.
     - fineScoord: curvilinear coordinate (cummulative length of curve 
                   for in each point)
     - fineProjXYcoord: coordinate by the projection of the curve on the XY 
     plane. Matrix 1*nPntsFine whose first elements is 0 and the rest the 
     cumulative distance to the first point
     '''
     tck, u = interpolate.splprep(self.roughCoordMtr, k=kgrade,s=smoothness)
     x_knots, y_knots,z_knots = interpolate.splev(np.linspace(0, 1, nPntsFine), tck,der=0)
     self.fineCoordMtr=np.array([x_knots, y_knots,z_knots])
     x_der, y_der,z_der = interpolate.splev(np.linspace(0, 1,nPntsFine), tck,der=1)
     self.fineDerivMtr=np.array([x_der, y_der,z_der])
     self.tck=tck
     self.fineScoord=self.getCumLength()
     x0,y0=(self.fineCoordMtr[0][0],self.fineCoordMtr[1][0])
     self.fineProjXYcoord=((self.fineCoordMtr[0]-x0)**2+(self.fineCoordMtr[1]-y0)**2)**0.5
     return
コード例 #5
0
def interpolation_polynom(path,grad):
#    data=np.ndarray(shape=(len(path),3),dtype=float)   #create an array of float type for the input points
#    #fill the array with the Pathdata
#    a=path[0]
#    b=path[1]
#    c=path[2]
#    for i in range(len(a)):
#        data[i,0]=a[i]
#        data[i,1]=b[i]
#        data[i,2]=c[i]
#    #arrange the data to use the function
#    data = data.transpose()
    #interpolate polynom degree 1
    if grad==1:
        tck, u= interpolate.splprep(path,k=1,s=10)
        path = interpolate.splev(np.linspace(0,1,200), tck)
    #interpolate polynom degree 2
    if grad==2:
        tck, u= interpolate.splprep(path,k=2,s=10)
        path = interpolate.splev(np.linspace(0,1,200), tck)
    #interpolate polynom degree 3
    if grad==3:
        tck, u= interpolate.splprep(path, w=None, u=None, ub=None, ue=None, k=3, task=0, s=0.3, t=None, full_output=0, nest=None, per=0, quiet=1)
        path = interpolate.splev(np.linspace(0,1,200), tck)
    return path
コード例 #6
0
ファイル: lib.py プロジェクト: chrinide/image-funcut
def locextr(v, x=None, refine = True, output='full',
	    sort_values = True,
	    **kwargs):
       "Finds local extrema "
       if x is None: x = np.arange(len(v))
       tck = splrep(x,v, **kwargs) # spline representation
       if refine:
               xfit = np.linspace(x[0],x[-1], len(x)*10)
       else:
               xfit = x
       yfit = splev(xfit, tck)
       der1 = splev(xfit, tck, der=1)
       #der2 = splev(xfit, tck, der=2)
       dersign = np.sign(der1)

       maxima = np.where(np.diff(dersign) < 0)[0]
       minima = np.where(np.diff(dersign) > 0)[0]
       if sort_values:
           maxima = sorted(maxima, key=lambda p: yfit[p], reverse=True)
           minima = sorted(minima, key=lambda p: yfit[p], reverse=False)
       if output=='full':
           return xfit, yfit, der1, maxima, minima 
       elif output=='max':
           return zip(xfit[maxima], yfit[maxima])
       elif output =='min':
           return zip(xfit[minima], yfit[minima])
コード例 #7
0
def testMVCgetDerivWpt(W):
    Ndim = W.shape[0]
    Nwaypoints = W.shape[1]
    dW = np.zeros((W.shape))
    ddW = np.zeros((W.shape))

    traj, tmp = splprep(W, k=5, s=0.01)
    # L = getLengthWpt(W)
    d = 0.0
    for i in range(0, Nwaypoints - 1):
        dW[:, i] = splev(d, traj, der=1)
        ddW[:, i] = splev(d, traj, der=2)
        # dW[:,i] = dW[:,i]/np.linalg.norm(dW[:,i])

        ds = np.linalg.norm(W[:, i + 1] - W[:, i])
        dv = np.linalg.norm(dW[:, i])
        dt = ds / dv
        # ddW[:,i] = ddW[:,i]/np.linalg.norm(ddW[:,i])
        print d
        d = d + dt

    dW[:, Nwaypoints - 1] = splev(d, traj, der=1)
    ddW[:, Nwaypoints - 1] = splev(d, traj, der=2)

    return [dW, ddW]
コード例 #8
0
ファイル: test_bsplines.py プロジェクト: Brucechen13/scipy
    def test_splev(self):
        xnew, b, b2 = self.xnew, self.b, self.b2

        # check that splev works with 1D array of coefficients
        # for array and scalar `x`
        assert_allclose(splev(xnew, b),
                        b(xnew), atol=1e-15, rtol=1e-15)
        assert_allclose(splev(xnew, b.tck),
                        b(xnew), atol=1e-15, rtol=1e-15)
        assert_allclose([splev(x, b) for x in xnew],
                        b(xnew), atol=1e-15, rtol=1e-15)

        # With n-D coefficients, there's a quirck:
        # splev(x, BSpline) is equivalent to BSpline(x)
        with suppress_warnings() as sup:
            sup.filter(DeprecationWarning,
                       "Calling splev.. with BSpline objects with c.ndim > 1 is not recommended.")
            assert_allclose(splev(xnew, b2), b2(xnew), atol=1e-15, rtol=1e-15)

        # However, splev(x, BSpline.tck) needs some transposes. This is because
        # BSpline interpolates along the first axis, while the legacy FITPACK
        # wrapper does list(map(...)) which effectively interpolates along the
        # last axis. Like so:
        sh = tuple(range(1, b2.c.ndim)) + (0,)   # sh = (1, 2, 0)
        cc = b2.c.transpose(sh)
        tck = (b2.t, cc, b2.k)
        assert_allclose(splev(xnew, tck),
                        b2(xnew).transpose(sh), atol=1e-15, rtol=1e-15)
コード例 #9
0
ファイル: test_bsplines.py プロジェクト: Brucechen13/scipy
    def test_insert(self):
        b, b2, xx = self.b, self.b2, self.xx

        j = b.t.size // 2
        tn = 0.5*(b.t[j] + b.t[j+1])

        bn, tck_n = insert(tn, b), insert(tn, (b.t, b.c, b.k))
        assert_allclose(splev(xx, bn),
                        splev(xx, tck_n), atol=1e-15)
        assert_(isinstance(bn, BSpline))
        assert_(isinstance(tck_n, tuple))   # back-compat: tck in, tck out

        # for n-D array of coefficients, BSpline.c needs to be transposed
        # after that, the results are equivalent.
        sh = tuple(range(b2.c.ndim))
        c_ = b2.c.transpose(sh[1:] + (0,))
        tck_n2 = insert(tn, (b2.t, c_, b2.k))

        bn2 = insert(tn, b2)

        # need a transpose for comparing the results, cf test_splev
        assert_allclose(np.asarray(splev(xx, tck_n2)).transpose(2, 0, 1),
                        bn2(xx), atol=1e-15)
        assert_(isinstance(bn2, BSpline))
        assert_(isinstance(tck_n2, tuple))   # back-compat: tck in, tck out
コード例 #10
0
    def getPosition(self, cur_time=datetime.datetime.now()):
        """Gets the current position for the obstacle.

        Args:
          cur_time: The current time as datetime.
        Returns:
          Returns a tuple (latitude, longitude, altitude_msl) for the obstacle
          at the given time. Returns None if could not compute.
        """
        waypoints = self.waypoints.order_by('order')
        num_waypoints = len(waypoints)

        # Waypoint counts of 0 or 1 can skip calc
        if num_waypoints == 0:
            return None
        elif num_waypoints == 1 or self.speed_avg <= 0:
            wpt = waypoints[0]
            return (wpt.position.gps_position.latitude,
                    wpt.position.gps_position.longitude,
                    wpt.position.altitude_msl)

        # Get spline representation
        (total_travel_time, spline_reps) = self.getSplineCurve(waypoints)

        # Sample spline at current time
        cur_time_sec = (cur_time -
                datetime.datetime.utcfromtimestamp(0)).total_seconds()
        cur_path_time = np.mod(cur_time_sec, total_travel_time)
        latitude = float(splev(cur_path_time, spline_reps[0]))
        longitude = float(splev(cur_path_time, spline_reps[1]))
        altitude_msl = float(splev(cur_path_time, spline_reps[2]))

        return (latitude, longitude, altitude_msl)
コード例 #11
0
ファイル: reddening.py プロジェクト: dhomeier/PopStar
    def romanzuniga07(wavelength, AKs, makePlot=False):
        filters = ['J', 'H', 'Ks', '[3.6]', '[4.5]', '[5.8]', '[8.0]']
        wave =      np.array([1.240, 1.664, 2.164, 3.545, 4.442, 5.675, 7.760])
        A_AKs =     np.array([2.299, 1.550, 1.000, 0.618, 0.525, 0.462, 0.455])
        A_AKs_err = np.array([0.530, 0.080, 0.000, 0.077, 0.063, 0.055, 0.059])
        
        # Interpolate over the curve
        spline_interp = interpolate.splrep(wave, A_AKs, k=3, s=0)

        A_AKs_at_wave = interpolate.splev(wavelength, spline_interp)
        A_at_wave = AKs * A_AKs_at_wave

        if makePlot:
            py.clf()
            py.errorbar(wave, A_AKs, yerr=A_AKs_err, fmt='bo', 
                        markerfacecolor='none', markeredgecolor='blue',
                        markeredgewidth=2)

            # Make an interpolated curve.
            wavePlot = np.arange(wave.min(), wave.max(), 0.1)
            extPlot = interpolate.splev(wavePlot, spline_interp)
            py.loglog(wavePlot, extPlot, 'k-')

            # Plot a marker for the computed value.
            py.plot(wavelength, A_AKs_at_wave, 'rs',
                    markerfacecolor='none', markeredgecolor='red',
                    markeredgewidth=2)
            py.xlabel('Wavelength (microns)')
            py.ylabel('Extinction (magnitudes)')
            py.title('Roman Zuniga et al. 2007')


        return A_at_wave
コード例 #12
0
ファイル: fit1dcurve.py プロジェクト: obscode/snpy
   def __call__(self, x):
      '''Interpolate at point [x].  Returns a 3-tuple: (y, mask) where [y]
      is the interpolated point, and [mask] is a boolean array with the same
      shape as [x] and is True where interpolated and False where extrapolated'''
      if not self.setup:
         self._setup()

      if len(num.shape(x)) < 1:
         scalar = True
      else:
         scalar = False

      x = num.atleast_1d(x)
      if self.realization:
         evm = num.atleast_1d(splev(x, self.realization))
         mask = num.greater_equal(x, self.realization[0][0])*\
                num.less_equal(x,self.realization[0][-1])
      else:
         evm = num.atleast_1d(splev(x, self.tck))
         mask = num.greater_equal(x, self.tck[0][0])*num.less_equal(x,self.tck[0][-1])

      if scalar:
         return evm[0],mask[0]
      else:
         return evm,mask
コード例 #13
0
ファイル: readers.py プロジェクト: huangynj/polar2grid
def interpolate_1km_geolocation(lons_40km, lats_40km):
    """Interpolate AVHRR 40km navigation to 1km.

    This code was extracted from the python-geotiepoints package from the PyTroll group. To avoid adding another
    dependency to this package this simple case from the geotiepoints was copied.
    """
    cols40km = numpy.arange(24, 2048, 40)
    cols1km = numpy.arange(2048)
    lines = lons_40km.shape[0]
    # row_indices = rows40km = numpy.arange(lines)
    rows1km = numpy.arange(lines)

    lons_rad = numpy.radians(lons_40km)
    lats_rad = numpy.radians(lats_40km)
    x__ = EARTH_RADIUS * numpy.cos(lats_rad) * numpy.cos(lons_rad)
    y__ = EARTH_RADIUS * numpy.cos(lats_rad) * numpy.sin(lons_rad)
    z__ = EARTH_RADIUS * numpy.sin(lats_rad)
    along_track_order = 1
    cross_track_order = 3

    lines = len(rows1km)
    newx = numpy.empty((len(rows1km), len(cols1km)), x__.dtype)
    newy = numpy.empty((len(rows1km), len(cols1km)), y__.dtype)
    newz = numpy.empty((len(rows1km), len(cols1km)), z__.dtype)
    for cnt in range(lines):
        tck = splrep(cols40km, x__[cnt, :], k=cross_track_order, s=0)
        newx[cnt, :] = splev(cols1km, tck, der=0)
        tck = splrep(cols40km, y__[cnt, :], k=cross_track_order, s=0)
        newy[cnt, :] = splev(cols1km, tck, der=0)
        tck = splrep(cols40km, z__[cnt, :], k=cross_track_order, s=0)
        newz[cnt, :] = splev(cols1km, tck, der=0)

    lons_1km = get_lons_from_cartesian(newx, newy)
    lats_1km = get_lats_from_cartesian(newx, newy, newz)
    return lons_1km, lats_1km
コード例 #14
0
ファイル: GRScalar.py プロジェクト: mchandra/GRScalar
    def dX_du(self, y, u, other_variables, solver):

        alpha = self.alpha
        r = y[0]
        sigma = y[1]
        f = y[2]
        g = y[3]
        h = y[4]
        I = y[5]
        k = y[6]

        v = other_variables[0]
        J = other_variables[1]
        F = splev(v, self.F_tck)
        d2A_du2 = splev(u, self.A_tck, 2)
        dA_du = splev(u, self.A_tck, 1)

        dr_du = f

        dsigma_du = I

        df_du = 2*f*I - alpha/r*(J - I**2. + d2A_du2 + dA_du**2.)

        dg_du = -(r/(r**2.- alpha))*(f*g + exp(2*sigma)/4.)

        dh_du = (1./(r**2.-alpha))*(f*g + np.exp(2*sigma)/4.) 

        dI_du = J

        df_dv = dg_du

        dg_dv = 2*g*h - F/r - alpha/r*(k - h**2.)

        dk_du =   (-2.*g/(r**2.-alpha)**2.)*(f*g + np.exp(2*sigma)/4.) 
                + (1./(r**2.-alpha))*(df_dv*g + f*dg_dv + np.exp(2*sigma)*h/2.)
コード例 #15
0
ファイル: spksort.py プロジェクト: hahong/array_proj
def align_core(spks, subsmp=ALIGN_SUBSMP, maxdt=ALIGN_MAXDT,
        peakloc=ALIGN_PEAKLOC, findbwd=ALIGN_FINDBWD,
        findfwd=ALIGN_FINDFWD, cutat=ALIGN_CUTAT, outdim=ALIGN_OUTDIM,
        peakfunc=ALIGN_PEAKFUNC):
    """Alignment algorithm based on (Quiroga et al., 2004)"""

    if peakfunc == 'argmin':
        peakfunc = np.argmin
    else:
        raise ValueError('Not recognized "peakfunc"')

    R = np.empty((spks.shape[0], outdim), dtype='int16')
    n = spks.shape[1]
    x0 = np.arange(n)

    for i_spk, spk in enumerate(spks):
        tck = ipl.splrep(x0, spk, s=0)

        xn = np.arange(peakloc - findbwd, peakloc + findfwd, 1. / subsmp)
        yn = ipl.splev(xn, tck)

        dt = xn[peakfunc(yn)] - peakloc
        if np.abs(dt) > maxdt:
            dt = 0

        x = x0 + dt
        y = ipl.splev(x, tck)

        R[i_spk] = np.round(y).astype('int16')[cutat: cutat + outdim]
        #dts.append(dt)
    return R
コード例 #16
0
ファイル: colorSpace.py プロジェクト: bps10/color
    def _genJuddVos(self):
        '''
        '''
        try:
            from scipy import interpolate as interp
        except ImportError:
            raise ImportError('Sorry cannot import scipy')
            
        #lights = np.array([700, 546.1, 435.8])
        juddVos = np.genfromtxt('data/ciexyzjv.csv', delimiter=',')
        spec = juddVos[:, 0]
        juddVos = juddVos[:, 1:]

        juddVos[:, 0] *= 100. / sum(juddVos[:, 0]) 
        juddVos[:, 1] *= 100. / sum(juddVos[:, 1])
        juddVos[:, 2] *= 100. / sum(juddVos[:, 2])
        r, g, b = self.TrichromaticEquation(juddVos[:, 0], 
                                            juddVos[:, 1],
                                            juddVos[:, 2])
        juddVos[:, 0], juddVos[:, 1], juddVos[:, 2] = r, g, b

        L_spline = interp.splrep(spec, juddVos[:, 0], s=0)
        M_spline = interp.splrep(spec, juddVos[:, 1], s=0)
        S_spline = interp.splrep(spec, juddVos[:, 2], s=0)
        L_interp = interp.splev(self.spectrum, L_spline, der=0)
        M_interp = interp.splev(self.spectrum, M_spline, der=0)
        S_interp = interp.splev(self.spectrum, S_spline, der=0)

        JVinterp = np.array([L_interp, M_interp, S_interp]).T      
        
        return JVinterp
コード例 #17
0
ファイル: GRScalar.py プロジェクト: mchandra/GRScalar
    def dX_dv(self, y, v, other_variables, classical_source, solver):

        alpha = self.alpha
        r = y[0]
        sigma = y[1]
        f = y[2]
        g = y[3]
        h = y[4]
        I = y[5]
        J = y[6]

        u = other_variables[0]
        k = other_variables[1]
        F = splev(v, self.F_tck)
        d2A_du2 = splev(u, self.A_tck, 2)
        dA_du = splev(u, self.A_tck, 1)

        df_du = 2*f*I - alpha/r*(J - I**2. + d2A_du2 + dA_du**2.)

        dg_du = -(r/(r**2.- alpha))*(f*g + exp(2*sigma)/4.)

        dr_dv = g

        dsigma_dv = h

        df_dv = dg_du

        dg_dv = 2*g*h - F/r - alpha/r*(k - h**2.)

        dh_dv = k

        dI_dv = (1./(r**2.-alpha))*(f*g + np.exp(2*sigma)/4.) # = dH_du

        dJ_dv =   (-2.*f/(r**2.-alpha)**2.)*(f*g + np.exp(2*sigma)/4.) 
                + (1./(r**2.-alpha))*(df_du*g + f*dg_du + np.exp(2*sigma)*I/2.)
コード例 #18
0
ファイル: new2.py プロジェクト: rabrahm/zaspe
def get_rats(ZO,ZI,ZF,pars):
	ords = []
	for i in range(sc.shape[1]):
		J1 = np.where(mw > sc[0,i,-1])[0]
		J2 = np.where(mw < sc[0,i,0])[0]
		if len(J1)>0 and len(J2)>0:
			ords.append(i)
	ords = np.array(ords)
	mf = get_full_model(pars[0],pars[1],pars[2],pars[3],RES_POW)
	tmodf = np.zeros((sc.shape[1],sc.shape[2]))
	tscif = np.zeros((sc.shape[1],sc.shape[2]))
	test_plot = np.zeros((4,sc.shape[1],sc.shape[2]))
	for i in ords:
		I = np.where((mw>sc[0,i,0]) & (mw<sc[0,i,-1]))[0]
		modw = mw[I]
		modf = mf[I]
		sciw = sc[0,i]
		scif = sc[3,i]/np.median(sc[3,i])
		modf = pixelization(modw,modf,sciw)
		#IMB = np.where(mask_bin[i]!=0)[0]
		#modf /= modf[IMB].mean()
		mscif = scipy.signal.medfilt(scif,11)
		rat = modf/mscif
		INF = np.where(mscif!=0)[0]
		coef = get_ratio(sciw[INF],rat[INF])
		scif = scif * np.polyval(coef,sciw)
		mscif = mscif * np.polyval(coef,sciw)
		coef = get_cont(sciw,mscif)
		scif = scif / np.polyval(coef,sciw)
		#plot(sciw,scif)
		coef = get_cont(sciw,modf)
		modf = modf / np.polyval(coef,sciw)
		#plot(sciw,modf)	
		tmodf[i] = modf
		tscif[i] = scif
		test_plot[0,i] = sc[0,i]
		test_plot[1,i] = scif
		test_plot[2,i] = modf
		test_plot[3,i] = mask_bin[i]
	#show()
	#print vcdx
	hdu = pyfits.PrimaryHDU(test_plot)
	os.system('rm example.fits')
	hdu.writeto('example.fits')
	rat = tscif/tmodf

	nejx = np.arange(100)/100.
	ratsout = []

	for i in range(len(ZI)):
		ejy = rat[ZO[i],ZI[i]:ZF[i]]
		ejx = np.arange(len(ejy))/float(len(ejy))
		tck = interpolate.splrep(ejx,ejy,k=3)
		if len(ratsout)==0:
			ratsout = interpolate.splev(nejx,tck)
		else:
			ratsout = np.vstack((ratsout,interpolate.splev(nejx,tck)))
			#plot(interpolate.splev(nejx,tck))
	#show()
	return ratsout
コード例 #19
0
def makeSpline(pointList,smPnts):   
    x = [p[0] for p in pointList]
    y = [p[1] for p in pointList] 
        
    xRed = [p[0] for p in smPnts]
    yRed = [p[1] for p in smPnts]

#    print xRed
#    print yRed                                     
    tck,uout = splprep([xRed,yRed],s=0.,k=2,per=False)
    tckOri, uout = splprep([x,y],s=0.,k=2,per=False)
                                
    N=300
                
    uout = list((float(i) / N for i in xrange(N + 1)))
                            
    xOri, yOri = splev(uout,tckOri)                        
    xSp,ySp = splev(uout,tck)         
                
    import dtw
    diff = dtw.dynamicTimeWarp(zip(xOri,yOri), zip(xSp,ySp))
                
    err =  diff/len(xSp)
        
    return tck,err
コード例 #20
0
ファイル: ripples.py プロジェクト: MStolpovskiy/qubic
 def __init__(self, freq,
              nripples=2,
              **keywords):
     nripples_max = 2
     if nripples not in range(nripples_max + 1):
         raise ValueError(
             'Input nripples is not a non-negative integer less than {}'.
             format(nripples_max + 1))
     self.nripples = nripples
     with open(PATH + 'sb_peak_plus_two_ripples_150HGz.pkl', 'r') as f:
         fl = load(f)
     fl /= fl.max()
     if freq == 150e9:
         fl_ = fl
     else:
         corr1 = [  1.65327594e-02, -2.24216210e-04, 9.70939946e-07, -1.40191824e-09]
         corr2 = [ -3.80559542e-01, 4.76370274e-03, -1.84237511e-05, 2.37962542e-08]
         def f(x, p):
             return p[0] + p[1] * x + p[2] * x**2 + p[3] * x**3
         ell = np.arange(len(fl)) + 1
         spl = splrep(ell * freq / 150e9, fl)
         if freq > 150e9:
             fl_ = splev(ell, spl) * (1 + f(freq / 1e9, corr2) + ell * f(freq / 1e9, corr1))
         else:
             fl_ = np.zeros(len(ell))
             fl_ = splev(ell[ell < ell.max() * freq / 150e9], spl) * \
                    (1 + f(freq / 1e9, corr2) + ell[ell < ell.max() * freq / 150e9] * f(freq / 1e9, corr1))
     self.fl = np.sqrt(fl_)
     self.fl[np.isnan(self.fl)] = 0.
コード例 #21
0
 def __interpolateParameters__(self, height, latitude, tkcDeep, tkcUpper):
     # Preallocate result array and start looping through all values
     isScalar = not util.isArray(height)
     results = []
     
     if isScalar:
         results = [0]
         height = [height]
         latitude = [latitude]
     else:
         results = np.zeros(height.shape)
     
     for i in range(0, len(height)):
         # Check where the height is with respect to the interpolation limits
         if height[i] <= tkcDeep[0][-1]:
             results[i] = scp_ip.splev(height[i], tkcDeep)
         elif height[i] >= tkcUpper[0][0]:
             results[i] = scp_ip.bisplev(height[i], latitude[i], tkcUpper)
         else:
             # Interpolate between the lower and upper interpolating functions (do
             # so linearly for now)
             low = scp_ip.splev(tkcDeep[0][-1], tkcDeep)
             high = scp_ip.bisplev(tkcUpper[0][0], latitude[i], tkcUpper)
             
             results[i] = low + (high - low) * (height[i] - tkcDeep[0][-1]) / \
                 (tkcUpper[0][0] - tkcDeep[0][-1])
                 
     if isScalar:
         return results[0]
         
     return results
コード例 #22
0
ファイル: gpsmath.py プロジェクト: Ripley6811/ladybug-pie
def ladybug_interp_data( data ):
    '''Interpolates between each 'new' GPS coordinate. Repeated coordinates are
    assumed to be incorrect/inaccurate. This method replaces all repetitions
    with interpolated coordinates in place. This method uses cubic spline
    interpolation.
    '''
    for i in reversed(xrange(1,len(data['lon']))):
        if data['lon'][i] == data['lon'][i-1]:
            data['lon'][i] = 1000.0
            data['valid'][i] = False
    select = where(data['lon'] < 999)

    # SPLINE VERSION
    data['alt'] = interpolate.splev(data['seqid'],
                                    interpolate.splrep(data['seqid'][select],
                                                       data['alt'][select],
                                                       s=0, k=2  ),
                                    der=0)
    data['lon'] = interpolate.splev(data['seqid'],
                                    interpolate.splrep(data['seqid'][select],
                                                       data['lon'][select],
                                                       s=0, k=2  ),
                                    der=0)
    data['lat'] = interpolate.splev(data['seqid'],
                                    interpolate.splrep(data['seqid'][select],
                                                       data['lat'][select],
                                                       s=0, k=2  ),
                                    der=0)
    return data
コード例 #23
0
ファイル: gl_priors.py プロジェクト: sofiasi/darcoda
def check_beta(beta):
    # now checking beta <= 1
    if max(beta)>1.:
        print('max beta!')              # TODO: remove, should be done by my_prior already
        return True
    # TODO: check smoothness of beta

    # now checking physical kappa: g(rvar, rfix, beta, dbetadr) >= 0
    if gp.usekappa == False:
        return False
    r0 = gp.xipol
    dR = r0[1:]-r0[:-1]
    r0extl = np.array([r0[0]/6., r0[0]/5., r0[0]/4., r0[0]/3., r0[0]/2., r0[0]/1.5])
    
    # extrapolation to the right (attention, could overshoot)
    dr0 = (r0[-1]-r0[-2])/8.
    r0extr = np.hstack([r0[-1]+dr0, r0[-1]+2*dr0, r0[-1]+3*dr0, r0[-1]+4*dr0])

    r0nu = np.hstack([r0extl, r0, dR/2.+r0[:-1], r0extr])
    r0nu.sort()
    tck0 = splrep(r0,beta*(r0**2+np.median(r0)**2), k=1, s=0.) # previous: k=2, s=0.1
    betanu = splev(r0nu,tck0)/(r0nu**2+np.median(r0)**2)

    drspl = splev(r0nu,tck0, der=1)
    dbetanudr = (drspl-betanu*2*r0nu)/(r0nu**2+np.median(r0)**2)
    for i in range(len(r0nu)-4):
        for j in range(i+1,len(r0nu)):
            if g(r0nu[j], r0nu[i], betanu[j], dbetanudr[j]) < 0:
                return True

    return False
コード例 #24
0
ファイル: synthetic.py プロジェクト: jluastro/JLU-python-code
def nishiyama09(wavelength, AKs, makePlot=False):
    # Data pulled from Nishiyama et al. 2009, Table 1

    filters = ['V', 'J', 'H', 'Ks', '[3.6]', '[4.5]', '[5.8]', '[8.0]']
    wave =      np.array([0.551, 1.25, 1.63, 2.14, 3.545, 4.442, 5.675, 7.760])
    A_AKs =     np.array([16.13, 3.02, 1.73, 1.00, 0.500, 0.390, 0.360, 0.430])
    A_AKs_err = np.array([0.04,  0.04, 0.03, 0.00, 0.010, 0.010, 0.010, 0.010])

    # Interpolate over the curve
    spline_interp = interpolate.splrep(wave, A_AKs, k=3, s=0)

    A_AKs_at_wave = interpolate.splev(wavelength, spline_interp)
    A_at_wave = AKs * A_AKs_at_wave

    if makePlot:
        py.clf()
        py.errorbar(wave, A_AKs, yerr=A_AKs_err, fmt='bo', 
                    markerfacecolor='none', markeredgecolor='blue',
                    markeredgewidth=2)
        
        # Make an interpolated curve.
        wavePlot = np.arange(wave.min(), wave.max(), 0.1)
        extPlot = interpolate.splev(wavePlot, spline_interp)
        py.loglog(wavePlot, extPlot, 'k-')

        # Plot a marker for the computed value.
        py.plot(wavelength, A_AKs_at_wave, 'rs',
                markerfacecolor='none', markeredgecolor='red',
                markeredgewidth=2)
        py.xlabel('Wavelength (microns)')
        py.ylabel('Extinction (magnitudes)')
        py.title('Nishiyama et al. 2009')

    
    return A_at_wave
コード例 #25
0
ファイル: utils.py プロジェクト: KlasKronander/ensemble_ioc
def expand_traj_dim_with_derivative(data, dt=0.01):
    augmented_trajs = []
    for traj in data:
        time_len = len(traj)
        t = np.linspace(0, time_len * dt, time_len)
        if time_len > 3:
            if len(traj.shape) == 1:
                """
                mono-dimension trajectory, row as the entire trajectory...
                """
                spl = interpolate.splrep(t, traj)
                traj_der = interpolate.splev(t, spl, der=1)
                tmp_augmented_traj = np.array([traj, traj_der]).T
            else:
                """
                multi-dimensional trajectory, row as the state variable...
                """
                tmp_traj_der = []
                for traj_dof in traj.T:
                    spl_dof = interpolate.splrep(t, traj_dof)
                    traj_dof_der = interpolate.splev(t, spl_dof, der=1)
                    tmp_traj_der.append(traj_dof_der)
                tmp_augmented_traj = np.vstack([traj.T, np.array(tmp_traj_der)]).T

            augmented_trajs.append(tmp_augmented_traj)

    return augmented_trajs
コード例 #26
0
ファイル: utils.py プロジェクト: navigator8972/nao_writing
def interp_data(data_set):
    """
    interpolate data
    """
    interp_data = dict()
    for key in data_set:
        interp_data[key] = []
        for l in data_set[key]:
            interp_letter = []
            for s in l:
                time_len = s.shape[0]
                if time_len > 3:
                    #interpolate each dim, cubic
                    t = np.linspace(0, 1, time_len)
                    spl_x = interpolate.splrep(t, s[:, 0])
                    spl_y = interpolate.splrep(t, s[:, 1])
                    #resample, 4 times more, vel is also scaled...
                    t_spl = np.linspace(0, 1, 4 * len(t))
                    x_interp = interpolate.splev(t_spl, spl_x, der=0)
                    y_interp = interpolate.splev(t_spl, spl_y, der=0)
                    # #construct new stroke
                    interp_letter.append(np.concatenate([[x_interp], [y_interp]], axis=0).transpose())
                else:
                    #direct copy if no sufficient number of points
                    interp_letter.append(s)
            interp_data[key].append(interp_letter)
    return interp_data
コード例 #27
0
ファイル: utils.py プロジェクト: navigator8972/nao_writing
def interp_data_fixed_num(data_set, num=100):
    """
    interpolate data with fixed number of points
    """
    interp_data = dict()
    for key in data_set:
        interp_data[key] = []
        for l in data_set[key]:
            interp_letter = []
            for s in l:
                time_len = s.shape[0]
                if time_len > 3:
                    #interpolate each dim, cubic
                    t = np.linspace(0, 1, time_len)
                    spl_x = interpolate.splrep(t, s[:, 0])
                    spl_y = interpolate.splrep(t, s[:, 1])
                    #resample, 4 times more, vel is also scaled...
                    t_spl = np.linspace(0, 1, num)
                    x_interp = interpolate.splev(t_spl, spl_x, der=0)
                    y_interp = interpolate.splev(t_spl, spl_y, der=0)
                    # #construct new stroke
                    data = np.concatenate([x_interp, y_interp])
                    dt = float(time_len)/num
                    interp_letter.append(np.concatenate([data, [dt]]))
                else:
                    #direct copy if no sufficient number of points
                    interp_letter.append(s)
            interp_data[key].append(interp_letter)
    return interp_data
コード例 #28
0
ファイル: utils.py プロジェクト: philrosenfield/padova_tracks
def second_derivative(xdata, inds, gt=False, s=0):
    '''
    The second derivative of d^2 xdata / d inds^2

    why inds for interpolation, not log l?
    if not using something like model number instead of log l,
    the tmin will get hidden by data with t < tmin but different
    log l. This is only a problem for very low Z.
    If I find the arg min of teff to be very close to MS_BEG it
    probably means the MS_BEG is at a lower Teff than Tmin.
    '''
    tckp, _ = splprep([inds, xdata], s=s, k=3)
    arb_arr = np.arange(0, 1, 1e-2)
    xnew, ynew = splev(arb_arr, tckp)
    # second derivative, bitches.
    ddxnew, ddynew = splev(arb_arr, tckp, der=2)
    ddyddx = ddynew / ddxnew
    # not just argmin, but must be actual min...
    try:
        if gt:
            aind = [a for a in np.argsort(ddyddx) if ddyddx[a-1] < 0][0]
        else:
            aind = [a for a in np.argsort(ddyddx) if ddyddx[a-1] > 0][0]
    except IndexError:
        return -1
    tmin_ind, _ = closest_match2d(aind, inds, xdata, xnew, ynew)
    return inds[tmin_ind]
コード例 #29
0
ファイル: prep.py プロジェクト: luminosa42/astr596
def Interpo(spectra) :
    wave_min = 1000
    wave_max = 20000
    pix = 2
    #wavelength = np.linspace(wave_min,wave_max,(wave_max-wave_min)/pix+1)  #creates N equally spaced wavelength values
    wavelength = np.arange(ceil(wave_min), floor(wave_max), dtype=int, step=pix)
    fitted_flux = []
    fitted_error = []
    new = []
    #new = Table()
    #new['col0'] = Column(wavelength,name = 'wavelength')
    new_spectrum=spectra	#declares new spectrum from list
    new_wave=new_spectrum[:,0]	#wavelengths
    new_flux=new_spectrum[:,1]	#fluxes
    new_error=new_spectrum[:,2]   #errors
    lower = new_wave[0] # Find the area where interpolation is valid
    upper = new_wave[len(new_wave)-1]
    lines = np.where((new_wave>lower) & (new_wave<upper))	#creates an array of wavelength values between minimum and maximum wavelengths from new spectrum
    indata=inter.splrep(new_wave[lines],new_flux[lines])	#creates b-spline from new spectrum
    inerror=inter.splrep(new_wave[lines],new_error[lines]) # doing the same with the errors
    fitted_flux=inter.splev(wavelength,indata)	#fits b-spline over wavelength range
    fitted_error=inter.splev(wavelength,inerror)   # doing the same with errors
    badlines = np.where((wavelength<lower) | (wavelength>upper))
    fitted_flux[badlines] = 0  # set the bad values to ZERO !!! 
    new = Table([wavelength,fitted_flux],names=('col1','col2')) # put the interpolated data into the new table    
    #newcol = Column(fitted_flux,name = 'Flux')  
    #new.add_column(newcol,index = None)
    return new
コード例 #30
0
ファイル: geotiepoints.py プロジェクト: adybbroe/pygac
    def _interp1d(self):
        """Interpolate in one dimension."""
        lines = len(self.hrow_indices)

        self.newx = np.empty((len(self.hrow_indices),
                              len(self.hcol_indices)),
                             self.x__.dtype)

        self.newy = np.empty((len(self.hrow_indices),
                              len(self.hcol_indices)),
                             self.y__.dtype)

        self.newz = np.empty((len(self.hrow_indices),
                              len(self.hcol_indices)),
                             self.z__.dtype)

        for cnt in range(lines):
            tck = splrep(self.col_indices, self.x__[cnt, :], k=self.ky_, s=0)
            self.newx[cnt, :] = splev(self.hcol_indices, tck, der=0)

            tck = splrep(self.col_indices, self.y__[cnt, :], k=self.ky_, s=0)
            self.newy[cnt, :] = splev(self.hcol_indices, tck, der=0)

            tck = splrep(self.col_indices, self.z__[cnt, :], k=self.ky_, s=0)
            self.newz[cnt, :] = splev(self.hcol_indices, tck, der=0)
コード例 #31
0
    def getLossAnchor(self, Ep, anc_slip_extr1=0.0, anc_slip_extr2=0.0):
        '''Return an array that contains  for each point in fineCoordMtr 
        the cumulative loss of prestressing due to anchorage.
        Loss due to friction must be previously calculated

        :param Ep:      elasic modulus of the prestressing steel

        :param anc_slip_extr1: anchorage slip (data provided by the manufacturer
                        of the anchorage system) at extremity 1 of 
                        the tendon (starting point)  (= deltaL)
        :param anc_slip_extr2: anchorage slip at extremity 2 of 
                        the tendon (ending point) (= deltaL)
        '''
        self.projXYcoordZeroAnchLoss = [0, self.fineProjXYcoord[-1]
                                        ]  # projected coordinates of the
        # points near extremity 1 and extremity 2,
        #respectively, that delimite the lengths of
        # tendon affected by the loss of prestress
        # due to the anchorages slip
        #Initialization values
        lossAnchExtr1 = np.zeros(len(self.fineScoord))
        lossAnchExtr2 = np.zeros(len(self.fineScoord))
        if anc_slip_extr1 > 0:
            self.slip1 = Ep * anc_slip_extr1
            self.tckLossFric = interpolate.splrep(
                self.fineScoord, self.stressAfterLossFrictionOnlyExtr1, k=3)
            if self.fAnc_ext1(self.fineScoord[-1]) < 0:  #the anchorage slip
                #affects all the tendon length
                lackArea = -2 * self.fAnc_ext1(self.fineScoord[-1])
                excess_delta_sigma = lackArea / self.getCumLength().item(-1)
                sCoordZeroLoss = self.fineScoord[-1]
            else:
                # we use newton_krylov solver to find the zero of function
                # fAnc_ext1 that gives us the point from which the tendon is
                # not affected by the anchorage slip. Tolerance and relative
                # step is given as parameters in order to enhance convergence
                tol = self.slip1 * 1e-6
                sCoordZeroLoss = optimize.newton_krylov(
                    F=self.fAnc_ext1,
                    xin=self.fineScoord[-1] / 2.0,
                    rdiff=0.1,
                    f_tol=tol)  #point from which the tendon is not
                #affected by the anchorage slip
                self.projXYcoordZeroAnchLoss[0] = self.ScoordToXYprojCoord(
                    sCoordZeroLoss.item(0))
                excess_delta_sigma = 0
            stressSCoordZeroLoss = interpolate.splev(
                sCoordZeroLoss, self.tckLossFric,
                der=0)  #stress in that point (after loss due to friction)
            condlist = [self.fineScoord <= sCoordZeroLoss]
            choicelist = [
                2 *
                (self.stressAfterLossFrictionOnlyExtr1 - stressSCoordZeroLoss)
                + excess_delta_sigma
            ]
            lossAnchExtr1 = np.select(condlist, choicelist)
        if anc_slip_extr2 > 0:
            self.slip2 = Ep * anc_slip_extr2
            self.tckLossFric = interpolate.splrep(
                self.fineScoord, self.stressAfterLossFrictionOnlyExtr2, k=3)
            if self.fAnc_ext2(self.fineScoord[0]) < 0:  #the anchorage slip
                #affects all the tendon length
                lackArea = -2 * self.fAnc_ext2(self.fineScoord[0])
                excess_delta_sigma = lackArea / self.getCumLength().item(-1)
                sCoordZeroLoss = self.fineScoord[0]
            else:
                # we use newton_krylov solver to find the zero of function
                # fAnc_ext1 that gives us the point from which the tendon is
                # not affected by the anchorage slip
                tol = self.slip2 * 1e-6
                sCoordZeroLoss = optimize.newton_krylov(
                    self.fAnc_ext2,
                    self.fineScoord[-1] / 2.0,
                    rdiff=0.1,
                    f_tol=tol)  #point from which the tendon is affected
                #by the anchorage slip
                self.projXYcoordZeroAnchLoss[1] = self.ScoordToXYprojCoord(
                    sCoordZeroLoss.item(0))
                excess_delta_sigma = 0
            stressSCoordZeroLoss = interpolate.splev(
                sCoordZeroLoss, self.tckLossFric,
                der=0)  #stress in that point (after loss due to friction)
            condlist = [self.fineScoord >= sCoordZeroLoss]
            choicelist = [
                2 * (self.stressAfterLossFriction - stressSCoordZeroLoss) +
                excess_delta_sigma
            ]
            lossAnchExtr2 = np.select(condlist, choicelist)
        lossAnch = lossAnchExtr1 + lossAnchExtr2
        return lossAnch
コード例 #32
0
ファイル: power_curve.py プロジェクト: sevberg/RESKit
    def convolute_by_gaussian(self,
                              scaling=0.06,
                              base=0.1,
                              extend_beyond_cut_out=True,
                              _min_speed=0.01,
                              _max_speed=40,
                              _steps=4000):
        """
        Convolutes a turbine power curve by a normal distribution function with wind-speed-dependent standard deviation.

        Parameters
        ----------
        scaling : float, optional
            scaling factor, by default 0.06

        base : float, optional
            base value, by default 0.1

        extend_beyond_cut_out : bool, optional
            extend the estimation beyond the turbine's cut out wind speed, by default True

        _min_speed : float, optional
            minimum wind speed value in m/s to be considered, by default 0.01

        _max_speed : int, optional
            maximum wind speed value in m/s to be considered, by default 40

        _steps : int, optional
            number of steps in between the wind speed range, by default 4000

        Returns
        -------
        PowerCurve
            The resulting convoluted power curve

        Notes
        ------
        The wind-speed-dependent standard deviation is computed with: std = wind_speed * scaling + base

        """
        # Initialize windspeed axis
        ws = np.linspace(_min_speed, _max_speed, _steps)
        dws = ws[1] - ws[0]

        # check if we have enough resolution
        tmp = (scaling * 5 + base) / dws
        if tmp < 1.0:  # manually checked threshold
            if tmp < 0.25:  # manually checked threshold
                raise ResError("Insufficient number of 'steps'")
            else:
                print(
                    "WARNING: 'steps' may not be high enough to properly compute the convoluted power curve. Check results or use a higher number of steps"
                )

        # Initialize vanilla power curve
        selfInterp = splrep(
            ws, np.interp(ws, self.wind_speed, self.capacity_factor))

        cf = np.zeros(_steps)
        sel = ws < self.wind_speed.max()
        cf[sel] = splev(ws[sel], selfInterp)

        cf[ws < self.wind_speed.min(
        )] = 0  # set all windspeed less than cut-in speed to 0
        cf[ws > self.wind_speed.max(
        )] = 0  # set all windspeed greater than cut-out speed to 0 (just in case)
        cf[cf < 0] = 0  # force a floor of 0
        # cf[cf>self[:,1].max()] = self[:,1].max() # force a ceiling of the max capacity

        # Begin convolution
        convolutedCF = np.zeros(_steps)
        for i, ws_ in enumerate(ws):
            convolutedCF[i] = (norm.pdf(
                ws, loc=ws_, scale=scaling * ws_ + base) * cf).sum() * dws

        # Correct cutoff, maybe
        if not extend_beyond_cut_out:
            convolutedCF[ws > self.wind_speed[-1]] = 0

        # Done!
        ws = ws[::40]
        convolutedCF = convolutedCF[::40]
        return PowerCurve(ws, convolutedCF)
コード例 #33
0
def volume_renorm(phase, xint, Pint, bmix, R, T, r_data):

    Pspln = r_data[5]
    rho = r_data[3][0]
    x = np.array(menus.flatten(r_data[1]))

    nd = len(rho)
    nx = x.shape[0]

    Pvec = np.empty((nd))
    Pfvec = np.empty((nd))
    dPdrho = np.empty((nd))

    flag = False
    inflex = False

    while flag!=True:
        #Interpolate specific pressure
        Pvint = Pspln(xint,0.0001)
        Pvec[0] = Pvint
        Pfvec[0] = Pvec[0] - Pint

        for i in range(1,nd-1):
            rhoint = float(i)/nd
            Pvint = Pspln(xint,rhoint)
            #print i,rhoint,Pvint
            Pvec[i] = Pvint
            Pfvec[i] = Pvec[i] - Pint
            dPdrho[i] = (Pvec[i+1] - Pvec[i-1])/(float(i+1)/nd-float(i-1)/nd)
            if inflex==False and dPdrho[i]<0:
                inflex=True
        Pvint = Pspln(xint,int(nd-1))
        Pvec[nd-1] = Pvint
        Pfvec[nd-1] = Pvec[nd-1] - Pint
        dPdrho[0] = (Pvec[1] - Pvec[0])/(float(1)/nd-float(0)/nd)
        dPdrho[nd-1] = (Pvec[nd-1] - Pvec[nd-2])/(float(nd-1)/nd-float(nd-2)/nd)

        #Bracketing the real densities at given P
        #print rho
        #print Pvec
        #print Pfvec
        
        #NEW
        P_fvec_spl = splrep(rho,Pfvec,k=3)         #Cubic Spline Representation
        Pfvec = splev(rho,P_fvec_spl,der=0)
        #NEW
        
        #plt.plot(rho,Pvec)
        #plt.ylim(-15,15)
        #plt.show()
        max1 = 2
        min1 = int(0.90*nd) #it was 0.90 before
        max2 = max1+2
        min2 = min1-2
        #raw_input('before')
        while Pfvec[max1]*Pfvec[max2]>0:# and max2<len(Pfvec):
            #max2 = max2+int(nd/200)
            max2 = max2+1
            #print 'max',max2
            #raw_input('max')
        if max2-int(nd/100)<0:
            max1 = 0
            #print 'max1',max1
        else:
            #max1 = max2-int(nd/100)
            max1 = max2-4
            #print 'else',max1

        while Pfvec[min1]*Pfvec[min2]>0 and min2>0:
            #min2 = min2-int(nd/200)
            min2 = min2-1
            #print 'min',min2
            #raw_input('min')
        #min1 = min2+int(nd/100)
        min1 = min2+4

        #print 'int',Pint,xint,phase
        #print 'falsi_spline',rho[max1],rho[max2],rho[min1],rho[min2]
        #print 'falsi_pressures',Pfvec[max1],Pfvec[max2],Pfvec[min1],Pfvec[min2]
        #Calculate coexistence densities in interpolated isotherm for given P
        rho_vap = numerical.falsi_spline(rho, Pfvec, rho[max1], rho[max2], 1e-5)
        #print 'rho_vap',rho_vap
        rho_liq = numerical.falsi_spline(rho, Pfvec, rho[min2], rho[min1], 1e-5)
        #print 'rho_liq',rho_liq
        #raw_input('...')

        if inflex==True and abs(rho_vap-rho_liq)<1e-5:
            Pint=Pint/2

        if inflex==True and abs(rho_vap-rho_liq)>1e-5:
            flag=True

        if xint>0.05:
            flag=True

    #Select desired density
    if phase<0:
        rho_out = rho_liq
    if phase>0:
        rho_out = rho_vap

    V = 1/(rho_out/bmix)
    V_out = []
    V_out.append(V)
    V_out.append(0)
    return V_out
コード例 #34
0
def renorm(EoS,IDs,MR,T,nd,nx,kij,nc,CR,en_auto,beta_auto,SM,n,estimate,L_est,phi_est):
    #nd    Size of density grid
    #nx    Size of mole fraction grid
    #n     Main loop iteration controller
    
    #If only 1 component is present mimic a binary mixture made of the same component
    if nc==1:
        IDs[1] = IDs[0]
    
    #Recover parameters
    L_rg = data.L(IDs)         #Vector with L parameters (cutoff length)
    phi_rg = data.phi(IDs)     #Vector with phi parameters
    Tc = data.Tc(IDs)
    
    #Components parameters
    a = eos.a_calc(IDs,EoS,T)
    b = eos.b_calc(IDs,EoS)
    Tr = T/np.array(Tc)
    
    #Main loop parameters
    x = np.array([0.0001,0.9999])
    stepx = (1/float(nx)) #Step to calculate change
    k = 0               #Vector fill counter
    i = 1               #Main loop counter
    r = 0               #Report counter
    count = 0
    rho = np.empty((nd))            #Density vector
    rhov = []                       #Density vector to export
    x0v = []                        #Mole fraction vector to export
    bmixv = []
    f = np.empty((nd))              #Helmholtz energy density vector
    fv = []                         #Helmholtz energy density vector to export
    fresv = []                      #Residual Helmholtz energy density vector to export
    Tv = []                         #Temperature values to export
    df = np.empty((nd))             #Changes in helmholtz energy density vector
    f_orig = np.empty((nd))         #Unmodified Helmholtz energy density vector
    rhob = []                       #Adimensional density vector
    u = np.empty((nd))
    X = np.ones((4*nc))
    Pv = []
    fmat = []
    Pmatv = np.empty((nx,nd))
    fmatres = []
    umat = []
    ures = np.empty((nd))
    uv = []
    df_vdw = np.empty((nd))
    
    df_vec2 = []
    f_vec2 = []
    P_vec2 = []
    u_vec2 = []
    aa2 = []
    
    if nc==1:
        X = np.ones((8))
    
    #Main loop*************************************
    while x[0]<1.0:
        if nc>1:
            print x[0]
        if x[0]==0.006: #after first step
            x[0]=0.005
        x[1]=1-x[0]
        
        if nc==1:
            x[0] = 0.999999
            x[1] = 0.000001
        
        #Mixture parameters
        bmix = eos.bmix_calc(MR,b,x)
        amix = eos.amix_calc(MR,a,x,kij)
        Nav = 6.023e23
        rhomax = 0.999999
        
        #Mixture Renormalization parameters
        L = np.dot(x,np.power(L_rg,3.0))
        L = np.power(L,1.0/3.0)
        phi = np.dot(x,phi_rg)
        
        #print L
        #print phi
        
        
        pi = math.pi
        omega = data.omega(IDs)[0]
        sig = np.power(6/pi*b/Nav*np.exp(omega),1.0/3.0)[0]
        #sig = np.power(b/Nav,1.0/3.0)[0]
        #c1 = data.c1(IDs)[0]
        #en = data.en(IDs)[0]
        #sig = np.power(1.15798*b/Nav,1.0/3.0)[0]
        L = sig
        #L = 1.5*sig
        #L = 1/c1*sig
        #print L,phi
        #L = 0.5/c1*sig
        #PHI = 4*(pi**2.0)
        
        #PHI = 1.0/pi/4.0
        #lamda = 1.5
        #w_LJ = (9.0*sig/7.0) #lennard-jones
        #print 'LJ=',w_LJ
        #w_SW = np.sqrt((1./5.)*(sig**2.0)*(lamda**5.0-1)/(lamda**3.0-1)) #square-well potential
        #print 'SW=',w_SW
        #phi = PHI*(w_LJ**2)/2/(L**2)
        #phi = PHI*(w_SW**2)/2/(L**2)
        
        #om = data.omega(IDs)
        #phi = 2/np.power(np.exp(om),4)[0]
        #w = 0.575*sig*en/T/kB/b[0]*1e6
        #print 'w=',w
        #phi = 2/np.power(np.exp(c1),4)[0]
        
        #w = 100.0*1e-9/100 #van der waals wavelength 100nm
        #phi = PHI*(w**2)/2/(L**2)
        
        #print L
        #print phi
        #print '---------'
        
        #New parameters
        #L = 1.5*np.power(b/Nav,1.0/3.0)
        #h = 6.626e-34
        #kkB = 1.38e-23
        #MM = 0.034
        #deBroglie = h/np.sqrt(3*kkB*T*MM/Nav)
        #phi = (deBroglie**2.0)/(L**2.0)*150*3.14
        #L = L[0]
        #phi = phi[0]
        #print 'L=',L
        #print 'phi=',phi
        

        if estimate==True:
            L = L_est
            phi = phi_est

        for k in range(0,nd):
            rho[k] = np.array(float(k)/nd/bmix)
            if k==0:
                rho[0] = 1e-6
            if EoS==6:
                if k==0:
                    X = association.frac_nbs(nc,1/rho[k],CR,en_auto,beta_auto,b,bmix,X,0,x,0,T,SM)
                else:
                    X = association.frac_nbs(nc,1/rho[k],CR,en_auto,beta_auto,b,bmix,X,1,x,0,T,SM)
            #print X,k
            #raw_input('...')
            f[k] = np.array(helm_rep(EoS,R,T,rho[k],amix,bmix,X,x,nc))   #Helmholtz energy density
            k = k+1
            
        f_orig = f                                #Initial helmholtz energy density
        
        """
        #-------------------------------------------
        #Fluctuation Analysis-----------------------
        #-------------------------------------------
        drho = rho[int(nd/2)]-rho[int(nd/2)-1]
        for i in range(1,nd-2):
            u[i] = (f[i+1]-f[i-1])/(2*drho)
        u[nd-1] = (f[nd-1]-f[nd-2])/drho
        u[0] = (f[1]-f[0])/drho
        
        fspl = splrep(rho,f,k=3)         #Cubic Spline Representation
        f3 = splev(rho,fspl,der=0)
        u = splev(rho,fspl,der=1)        #Evaluate Cubic Spline First derivative

        P = -f3+rho*u
        
        P_vec2.append(P)
        u_vec2.append(u)
        #===========================================
        #===========================================
        """
    
        #Subtract attractive forces (due long range correlations)
        f = f + 0.5*amix*(rho**2)
        
        df_vec2.append(rho)
        f_vec2.append(rho)

        #Adimensionalization
        rho = rho*bmix
        f = f*bmix*bmix/amix
        T = T*bmix*R/amix
        
        f_vec2.append(f)

        rho1 = rho.flatten()

        #Main loop****************************************************************
        i = 1
        while i<=n:
            #print i
            #K = kB*T/((2**(3*i))*(L**3))
            #K = R*T/((L**3)*(2**(3*i)))
            K = T/(2**(3*i))/((L**3)/bmix*6.023e23)
            
            
            #Long and Short Range forces
            fl = helm_long(EoS,rho,f)
            fs = helm_short(EoS,rho,f,phi,i)

            #Calculate df
            width = rhomax/nd
            w = 0
            for w in range(0,nd):
                df[w] = renorm_df(w,nd,fl,fs,K,rho,width)
            
            #Update Helmholtz Energy Density
            df = np.array(df) #used to evaluate each step
            f = f + df
            df_vec2.append(list(df/bmix/bmix*amix*1e6/rho))
            f_vec2.append(list(f))
            #print 'i=',i,K,f[2],df[2],T,df_vec2[1][2]
            i = i+1
            #print i

        #Dimensionalization
        rho = rho/bmix
        f = f/bmix/bmix*amix
        T = T/bmix/R*amix
        
        #df_total = 
        #df = np.array(df)
        #df_vec.append(df)
        
        #Add original attractive forces
        f = f - 0.5*amix*(rho**2)
        
        #Store residual value of f
        #fres = f - rho*R*T*(np.log(rho)-1) #WRONG
        fres = f - rho*R*T*np.log(rho)
        #f = f + rho*R*T*(np.log(rho)-1) #Already accounting ideal gas energy
        
        #strT = str(T)
        #dfT = ('df_%s.csv' %strT)
        TT = np.zeros((nd))
        df_vdw = 0.5*((rho*bmix)**2)
        df_vec2.append(list(df_vdw))
        f_vec2.append(list(df_vdw))
        for i in range(0,nd):
            TT[i] = T
        df_vec2.append(TT)
        f_vec2.append(TT)
        envelope.report_df(df_vec2,'df.csv')
        envelope.report_df(f_vec2,'f.csv')
        #raw_input('----')

        #if(EoS==6):
        #    f = fres
        
        fv.append(f)
        fresv.append(fres)
        x0v.append(x[0])
        bmixv.append(bmix)
        
        if r==0:
            rhob.append(rho*bmix) #rhob vector is always the same
            rhov.append(rho) #in case the calculation is done for one-component
        r=1

        drho = rho[int(nd/2)]-rho[int(nd/2)-1]
        for i in range(1,nd-2):
            u[i] = (f[i+1]-f[i-1])/(2*drho)
        u[nd-1] = (f[nd-1]-f[nd-2])/drho
        u[0] = (f[1]-f[0])/drho
        
        fspl = splrep(rho,f,k=3)         #Cubic Spline Representation
        f = splev(rho,fspl,der=0)
        u = splev(rho,fspl,der=1)        #Evaluate Cubic Spline First derivative

        P = -f+rho*u
        Pv.append(P)
        for j in range(0,nd):
            Pmatv[count][j] = P[j]
            #print Pmatv[count][j],count,j,x[0]
        count = count+1
        
        """
        #Fluctuation Analysis-----------------------
        P_vec2.append(P)
        u_vec2.append(u)
        P_vec2.append(TT)
        u_vec2.append(TT)
        envelope.report_df(P_vec2,'P.csv')
        envelope.report_df(u_vec2,'u.csv')
        #===========================================
        """

        fmat.append(f)
        fmatres.append(fres)

        x[0] = x[0]+stepx
        #if nc>1:
        #    if abs(x[0]-1.0)<1e-5:
        #        x[0] = 0.9999
        
    if nc>1:
        Pmat = RectBivariateSpline(x0v,rhob,Pmatv)
    else:
        Pmat = 'NULL'
        
        #differente imposed by renormalization
        dfv = []
        dff = f-f_orig
        dfv.append(dff)
        avdw = []
        aa2 = 0.5*amix*(rho**2)
        avdw.append(aa2)

    renorm_out = []
    renorm_out.append(fv)
    renorm_out.append(x0v)
    renorm_out.append(rhov)
    renorm_out.append(rhob)
    renorm_out.append(fmat)
    renorm_out.append(Pmat)
    if nc>1: #If binary mixture, report calculated values
        print 'before report'
        ren_u = report_renorm_bin(rhob,x0v,fmatres,nx,nd,MR,IDs,EoS)
        renorm_out.append(ren_u)
    else:
        renorm_out.append(0)
    renorm_out.append(fresv)
    renorm_out.append(Pv)
    renorm_out.append(bmixv)
    renorm_out.append(dfv)
    renorm_out.append(avdw)
    return renorm_out
コード例 #35
0
ファイル: FE1 v2.py プロジェクト: paulsod/financial-economics
def df(x):
    return sci.splev(x, tck, der=1)
コード例 #36
0
ファイル: FE1 v2.py プロジェクト: paulsod/financial-economics
def f(x):
    return sci.splev(x, tck, der=0)
コード例 #37
0
    169, 173, 176, 178, 180, 184, 186, 188, 190, 192, 194, 196, 198, 200, 201,
    202, 204, 250
]

x = np.array(x_not_np_array)
y = np.array(y_not_np_array)

x2 = []
y2 = []

x3 = []
y3 = []

tck, u = interpolate.splprep([x, y], s=smoothing)
unew = np.arange(0, 1.01, 0.001)
out = interpolate.splev(unew, tck)


def neki(X1, Y1, X2, Y2):
    dx = (Y2 - Y1) / sqrt((X2 - X1)**2 + (Y2 - Y1)**2) * z
    dy = (X2 - X1) / sqrt((X2 - X1)**2 + (Y2 - Y1)**2) * z

    x2.append((X2 + X1) / 2 + dx)
    x3.append((X2 + X1) / 2 - dx)

    y2.append((Y2 + Y1) / 2 - dy)
    y3.append((Y2 + Y1) / 2 + dy)

    return

コード例 #38
0
)
import Struct, copy, Fileio

if __name__ == '__main__':

    emin = -5.0
    emax = 5.0
    rom = 100
    execfile('INPUT.py')
    TB = Struct.TBstructure('POSCAR', p['atomnames'], p['orbs'])
    cor_at = p['cor_at']
    cor_orb = p['cor_orb']
    TB.Compute_cor_idx(cor_at, cor_orb)
    ommesh = linspace(emin, emax, rom)
    Sig_tot = zeros((TB.ncor_orb, rom), dtype=complex)
    for i, ats in enumerate(cor_at):
        (om, Sig, TrSigmaG, Epot, nf_q,
         mom) = Fileio.Read_complex_Data('Sig' + str(i + 1) + '.out')
        newSig = zeros((len(Sig), rom), dtype=complex)
        for ii in range(len(Sig)):
            SigSpline = interpolate.splrep(om, Sig[ii].real, k=1, s=0)
            newSig[ii, :] += interpolate.splev(ommesh, SigSpline)
            SigSpline = interpolate.splrep(om, Sig[ii].imag, k=1, s=0)
            newSig[ii, :] += 1j * interpolate.splev(ommesh, SigSpline)
        for at in ats:
            for ii, orbs in enumerate(cor_orb[i]):
                for orb in orbs:
                    idx = TB.idx[at][orb]
                    Sig_tot[idx, :] = copy.deepcopy(newSig[ii, :])
    Fileio.Print_complex_multilines(Sig_tot, ommesh, 'SigMoo_real.out')
コード例 #39
0
    #%% Repeat with splines ###
    print('running ccf using splines')
    j_splines = []
    k_splines = []
    test_x = np.linspace(0,len(full_months)-4,100)
    knots = testx_months[1:-1]
    new_test_j_flux = np.zeros(np.shape(test_j_flux))
    new_test_k_flux = np.zeros(np.shape(test_k_flux))
    for m in range(len(bindata)):
        j_spl = splrep(Jx_months, test_j_flux[m,:], w=1/test_j_fluxerr[m,:], t=knots)
        j_splines.append(j_spl)
        k_spl = splrep(Kx_months, test_k_flux[m,:], w=1/test_k_fluxerr[m,:], t=knots)
        k_splines.append(k_spl)
        
        ###  use splines to make test arrays ###
        new_test_j_flux[m,:] = splev(Jx_months, j_spl)
        new_test_k_flux[m,:] = splev(Kx_months, k_spl)
    
    spline_test_j_flux = new_test_j_flux
    spline_test_k_flux = new_test_k_flux
    
    #%% Create correlation arrays ###
    ''' Need arrays that have a space for every possible month so that the values 
    can be separated by the correct time periods'''

    ### Assign new arrays ###
    spline_corr_test_j_flux = vari_funcs.cross_correlation.make_corr_arrays(spline_test_j_flux, 
                                                                     jmask, 
                                                                     full_months)
    spline_corr_test_k_flux = vari_funcs.cross_correlation.make_corr_arrays(spline_test_k_flux, 
                                                                     kmask,
コード例 #40
0
def warp(u, index):
    x = np.arange(len(u))
    tck = interpolate.splrep(x, u)
    x1 = np.arange(index)
    return interpolate.splev(x1, tck)
コード例 #41
0
 def fs_model_curve(self, frequency):
     '''interpolates fs model data'''
     tck = interpolate.splrep(np.arange(len(FluctuationPattern.fs_model)),
                              FluctuationPattern.fs_model,
                              s=0)
     return interpolate.splev(frequency, tck, der=0)
コード例 #42
0
ファイル: fit.py プロジェクト: lampo808/PyCorrFit
    def compute_weights(correlation, verbose=0, uselatex=False):
        """ computes and returns weights of the same length as 
        `correlation.correlation_fit`
        
        `correlation` is an instance of Correlation
        """
        corr = correlation
        model = corr.fit_model
        model_parms = corr.fit_parameters
        ival = corr.fit_ival
        weight_data = corr.fit_weight_data
        weight_type = corr.fit_weight_type
        #parameters = corr.fit_parameters
        #parameters_range = corr.fit_parameters_range
        #parameters_variable = corr.fit_parameters_variable

        cdat = corr.correlation
        if cdat is None:
            raise ValueError("Cannot compute weights; No correlation given!")
        cdatfit = corr.correlation_fit
        x_full = cdat[:, 0]
        y_full = cdat[:, 1]
        x_fit = cdatfit[:, 0]
        #y_fit = cdatfit[:,1]

        dataweights = np.ones_like(x_fit)

        try:
            weight_spread = int(weight_data)
        except:
            if verbose > 1:
                warnings.warn(
                    "Could not get weight spread for spline. Setting it to 3.")
            weight_spread = 3

        if weight_type[:6] == "spline":
            # Number of knots to use for spline
            try:
                knotnumber = int(weight_type[6:])
            except:
                if verbose > 1:
                    print("Could not get knot number. Setting it to 5.")
                knotnumber = 5

            # Compute borders for spline fit.
            if ival[0] < weight_spread:
                # optimal case
                pmin = ival[0]
            else:
                # non-optimal case
                # we need to cut pmin
                pmin = weight_spread
            if x_full.shape[0] - ival[1] < weight_spread:
                # optimal case
                pmax = x_full.shape[0] - ival[1]
            else:
                # non-optimal case
                # we need to cut pmax
                pmax = weight_spread

            x = x_full[ival[0] - pmin:ival[1] + pmax]
            y = y_full[ival[0] - pmin:ival[1] + pmax]
            # we are fitting knots on a base 10 logarithmic scale.
            xs = np.log10(x)
            knots = np.linspace(xs[1], xs[-1], knotnumber + 2)[1:-1]
            try:
                tck = spintp.splrep(xs, y, s=0, k=3, t=knots, task=-1)
                ys = spintp.splev(xs, tck, der=0)
            except:
                if verbose > 0:
                    raise ValueError("Could not find spline fit with "+\
                                     "{} knots.".format(knotnumber))
                return
            if verbose > 0:
                ## If plotting module is available:
                #name = "spline fit: "+str(knotnumber)+" knots"
                #plotting.savePlotSingle(name, 1*x, 1*y, 1*ys,
                #                         dirname=".",
                #                         uselatex=uselatex)
                # use matplotlib.pylab
                try:
                    from matplotlib import pylab as plt
                    plt.xscale("log")
                    plt.plot(x, ys, x, y)
                    plt.show()
                except ImportError:
                    # Tell the user to install matplotlib
                    print("Couldn't import pylab! - not Plotting")

            ## Calculation of variance
            # In some cases, the actual cropping interval from ival[0]
            # to ival[1] is chosen, such that the dataweights must be
            # calculated from unknown datapoints.
            # (e.g. points+endcrop > len(correlation)
            # We deal with this by multiplying dataweights with a factor
            # corresponding to the missed points.
            for i in range(x_fit.shape[0]):
                # Define start and end positions of the sections from
                # where we wish to calculate the dataweights.
                # Offset at beginning:
                if i + ival[0] < weight_spread:
                    # The offset that occurs
                    offsetstart = weight_spread - i - ival[0]
                    offsetcrop = 0
                elif ival[0] > weight_spread:
                    offsetstart = 0
                    offsetcrop = ival[0] - weight_spread
                else:
                    offsetstart = 0
                    offsetcrop = 0
                # i: counter on correlation array
                # start: counter on y array
                start = i - weight_spread + offsetstart + ival[0] - offsetcrop
                end = start + 2 * weight_spread + 1 - offsetstart
                dataweights[i] = (y[start:end] - ys[start:end]).std()
                # The standard deviation at the end and the start of the
                # array are multiplied by a factor corresponding to the
                # number of bins that were not used for calculation of the
                # standard deviation.
                if offsetstart != 0:
                    reference = 2 * weight_spread + 1
                    dividor = reference - offsetstart
                    dataweights[i] *= reference / dividor
                # Do not substitute len(y[start:end]) with end-start!
                # It is not the same!
                backset = 2 * weight_spread + 1 - len(
                    y[start:end]) - offsetstart
                if backset != 0:
                    reference = 2 * weight_spread + 1
                    dividor = reference - backset
                    dataweights[i] *= reference / dividor
        elif weight_type == "model function":
            # Number of neighboring (left and right) points to include
            if ival[0] < weight_spread:
                pmin = ival[0]
            else:
                pmin = weight_spread
            if x_full.shape[0] - ival[1] < weight_spread:
                pmax = x_full.shape[0] - ival[1]
            else:
                pmax = weight_spread
            x = x_full[ival[0] - pmin:ival[1] + pmax]
            y = y_full[ival[0] - pmin:ival[1] + pmax]
            # Calculated dataweights
            for i in np.arange(x_fit.shape[0]):
                # Define start and end positions of the sections from
                # where we wish to calculate the dataweights.
                # Offset at beginning:
                if i + ival[0] < weight_spread:
                    # The offset that occurs
                    offsetstart = weight_spread - i - ival[0]
                    offsetcrop = 0
                elif ival[0] > weight_spread:
                    offsetstart = 0
                    offsetcrop = ival[0] - weight_spread
                else:
                    offsetstart = 0
                    offsetcrop = 0
                # i: counter on correlation array
                # start: counter on correlation array
                start = i - weight_spread + offsetstart + ival[0] - offsetcrop
                end = start + 2 * weight_spread + 1 - offsetstart
                #start = ival[0] - weight_spread + i
                #end = ival[0] + weight_spread + i + 1
                diff = y - model(model_parms, x)
                dataweights[i] = diff[start:end].std()
                # The standard deviation at the end and the start of the
                # array are multiplied by a factor corresponding to the
                # number of bins that were not used for calculation of the
                # standard deviation.
                if offsetstart != 0:
                    reference = 2 * weight_spread + 1
                    dividor = reference - offsetstart
                    dataweights[i] *= reference / dividor
                # Do not substitute len(diff[start:end]) with end-start!
                # It is not the same!
                backset = 2 * weight_spread + 1 - len(
                    diff[start:end]) - offsetstart
                if backset != 0:
                    reference = 2 * weight_spread + 1
                    dividor = reference - backset
                    dataweights[i] *= reference / dividor
        elif weight_type == "none":
            pass
        else:
            # This means that the user knows the dataweights and already
            # gave it to us.
            weights = weight_data
            assert weights is not None, "User defined weights not given: " + weight_type

            # Check if these other weights have length of the cropped
            # or the full array.
            if weights.shape[0] == x_fit.shape[0]:
                dataweights = weights
            elif weights.shape[0] == x_full.shape[0]:
                dataweights = weights[ival[0]:ival[1]]
            else:
                raise ValueError, \
                  "`weights` must have length of full or cropped array."

        return dataweights
コード例 #43
0
# pr(a,b5,a+b5)

# x=np.random.randint(1,10,size=(2,2))
x = np.linspace(-2 * np.pi, 2 * np.pi, 11)


def f(x):
    return np.sin(x) + 0.5 * x


c = f(x)
# pr(c,x.shape)
tck = spi.splrep(x, c, k=1)
# pr(x,c,tck,len(tck))
iy = spi.splev(x, tck)


def paint(x, y, iy):
    import matplotlib.pyplot as plt

    plt.figure(figsize=(8, 4), dpi=80)
    plt.plot(x, y, color='blue', linewidth=3.5, label='true value')
    plt.scatter(x, iy, 30, color='red', label='interpolate value')

    plt.legend(loc=0)

    plt.grid(True)

    plt.xlabel('x')
コード例 #44
0
ファイル: __init__.py プロジェクト: jlvdb/montepython_public
    def loglkl(self, cosmo, data):

        #start = time.time()

        # One wants to obtain here the relation between z and r, this is done
        # by asking the cosmological module with the function z_of_r
        self.r = np.zeros(self.nzmax, 'float64')
        self.dzdr = np.zeros(self.nzmax, 'float64')

        self.r, self.dzdr = cosmo.z_of_r(self.z)

        # Compute now the selection function eta(r) = eta(z) dz/dr normalized
        # to one. The np.newaxis helps to broadcast the one-dimensional array
        # dzdr to the proper shape. Note that eta_norm is also broadcasted as
        # an array of the same shape as eta_z
        self.eta_r = self.eta_z * (self.dzdr[:, np.newaxis] / self.eta_norm)

        # Compute function g_i(r), that depends on r and the bin
        # g_i(r) = 2r(1+z(r)) int_0^+\infty drs eta_r(rs) (rs-r)/rs
        # TODO is the integration from 0 or r ?
        g = np.zeros((self.nzmax, self.nbin), 'float64')
        for Bin in xrange(self.nbin):
            for nr in xrange(1, self.nzmax - 1):
                fun = self.eta_r[nr:, Bin] * (self.r[nr:] -
                                              self.r[nr]) / self.r[nr:]
                g[nr, Bin] = np.sum(0.5 * (fun[1:] + fun[:-1]) *
                                    (self.r[nr + 1:] - self.r[nr:-1]))
                g[nr, Bin] *= 2. * self.r[nr] * (1. + self.z[nr])

# compute the maximum l where most contributions are linear
# as a function of the lower bin number
        if self.use_lmax_lincut:
            lintegrand_lincut_o = np.zeros((self.nzmax, self.nbin, self.nbin),
                                           'float64')
            lintegrand_lincut_u = np.zeros((self.nzmax, self.nbin, self.nbin),
                                           'float64')
            l_lincut = np.zeros((self.nbin, self.nbin), 'float64')
            l_lincut_mean = np.zeros(self.nbin, 'float64')
            for Bin1 in xrange(self.nbin):
                for Bin2 in xrange(Bin1, self.nbin):
                    lintegrand_lincut_o[
                        1:, Bin1,
                        Bin2] = g[1:, Bin1] * g[1:, Bin2] / (self.r[1:])
            for Bin1 in xrange(self.nbin):
                for Bin2 in xrange(Bin1, self.nbin):
                    lintegrand_lincut_u[
                        1:, Bin1,
                        Bin2] = g[1:, Bin1] * g[1:, Bin2] / (self.r[1:]**2)
            for Bin1 in xrange(self.nbin):
                for Bin2 in xrange(Bin1, self.nbin):
                    l_lincut[Bin1, Bin2] = np.sum(
                        0.5 * (lintegrand_lincut_o[1:, Bin1, Bin2] +
                               lintegrand_lincut_o[:-1, Bin1, Bin2]) *
                        (self.r[1:] - self.r[:-1]))
                    l_lincut[Bin1, Bin2] /= np.sum(
                        0.5 * (lintegrand_lincut_u[1:, Bin1, Bin2] +
                               lintegrand_lincut_u[:-1, Bin1, Bin2]) *
                        (self.r[1:] - self.r[:-1]))
            z_peak = np.zeros((self.nbin, self.nbin), 'float64')
            for Bin1 in xrange(self.nbin):
                for Bin2 in xrange(Bin1, self.nbin):
                    z_peak[Bin1, Bin2] = self.zmax
                    for index_z in xrange(self.nzmax):
                        if (self.r[index_z] > l_lincut[Bin1, Bin2]):
                            z_peak[Bin1, Bin2] = self.z[index_z]
                            break
                    if self.use_zscaling:
                        l_lincut[Bin1,
                                 Bin2] *= self.kmax_hMpc * cosmo.h() * pow(
                                     1. + z_peak[Bin1, Bin2], 2. /
                                     (2. + cosmo.n_s()))
                    else:
                        l_lincut[Bin1, Bin2] *= self.kmax_hMpc * cosmo.h()
                l_lincut_mean[Bin1] = np.sum(
                    l_lincut[Bin1, :]) / (self.nbin - Bin1)

#for Bin1 in xrange(self.nbin):
#for Bin2 in xrange(Bin1,self.nbin):
#print("%s\t%s\t%s\t%s" % (Bin1, Bin2, l_lincut[Bin1, Bin2], l_lincut_mean[Bin1]))

#for nr in xrange(1, self.nzmax-1):
#	print("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" % (self.z[nr], g[nr, 0], g[nr, 1], g[nr, 2], g[nr, 3], g[nr, 4], g[nr, 5], g[nr, 6], g[nr, 7], g[nr, 8], g[nr, 9]))

# Get power spectrum P(k=l/r,z(r)) from cosmological module
        kmin_in_inv_Mpc = self.k_min_h_by_Mpc * cosmo.h()
        kmax_in_inv_Mpc = self.k_max_h_by_Mpc * cosmo.h()
        pk = np.zeros((self.nlmax, self.nzmax), 'float64')
        for index_l in xrange(self.nlmax):
            for index_z in xrange(1, self.nzmax):

                # These lines would return an error when you ask for P(k,z) out of computed range
                #        if (self.l[index_l]/self.r[index_z] > self.k_max):
                #            raise io_mp.LikelihoodError(
                #                "you should increase euclid_lensing.k_max up to at least %g" % (self.l[index_l]/self.r[index_z]))
                #        pk[index_l, index_z] = cosmo.pk(
                #            self.l[index_l]/self.r[index_z], self.z[index_z])

                # These lines set P(k,z) to zero out of [k_min, k_max] range
                k_in_inv_Mpc = self.l[index_l] / self.r[index_z]
                if (k_in_inv_Mpc < kmin_in_inv_Mpc) or (k_in_inv_Mpc >
                                                        kmax_in_inv_Mpc):
                    pk[index_l, index_z] = 0.
                else:
                    pk[index_l,
                       index_z] = cosmo.pk(self.l[index_l] / self.r[index_z],
                                           self.z[index_z])

#print("%s\t%s\t%s" %(self.l[index_l], self.z[index_z], pk[index_l, index_z]))

# Recover the non_linear scale computed by halofit. If no scale was
# affected, set the scale to one, and make sure that the nuisance
# parameter epsilon is set to zero
        k_sigma = np.zeros(self.nzmax, 'float64')
        if (cosmo.nonlinear_method == 0):
            k_sigma[:] = 1.e6
        else:
            k_sigma = cosmo.nonlinear_scale(self.z, self.nzmax)

        if not (cosmo.nonlinear_method == 0):
            k_sigma_problem = False
            for index_z in xrange(self.nzmax - 1):
                if (k_sigma[index_z + 1] <
                        k_sigma[index_z]) or (k_sigma[index_z + 1] > 2.5):
                    k_sigma[index_z + 1] = 2.5
                    k_sigma_problem = True
                #print("%s\t%s" % (k_sigma[index_z], self.z[index_z]))
            if k_sigma_problem:
                warnings.warn(
                    "There were unphysical (decreasing in redshift or exploding) values of k_sigma (=cosmo.nonlinear_scale(...)). To proceed they were set to 2.5, the highest scale that seems to be stable."
                )

        # Define the alpha function, that will characterize the theoretical
        # uncertainty. Chosen to be 0.001 at low k, raise between 0.1 and 0.2
        # to self.theoretical_error
        alpha = np.zeros((self.nlmax, self.nzmax), 'float64')
        # self.theoretical_error = 0.1
        if self.theoretical_error != 0:
            #MArchi     for index_l in range(self.nlmax):
            #k = self.l[index_l]/self.r[1:]
            #alpha[index_l, 1:] = np.log(1.+k[:]/k_sigma[1:])/(
            #1.+np.log(1.+k[:]/k_sigma[1:]))*self.theoretical_error
            for index_l in xrange(self.nlmax):
                for index_z in xrange(1, self.nzmax):
                    k = self.l[index_l] / self.r[index_z]
                    alpha[index_l,
                          index_z] = np.log(1. + k / k_sigma[index_z]) / (
                              1. + np.log(1. + k / k_sigma[index_z])
                          ) * self.theoretical_error

        # recover the e_th_nu part of the error function
        e_th_nu = self.coefficient_f_nu * cosmo.Omega_nu / cosmo.Omega_m()

        # Compute the Error E_th_nu function
        if 'epsilon' in self.use_nuisance:
            E_th_nu = np.zeros((self.nlmax, self.nzmax), 'float64')
            for index_l in range(1, self.nlmax):
                E_th_nu[index_l, :] = np.log(
                    1. + self.l[index_l] / k_sigma[:] * self.r[:]) / (
                        1. + np.log(1. + self.l[index_l] / k_sigma[:] *
                                    self.r[:])) * e_th_nu

                # Add the error function, with the nuisance parameter, to P_nl_th, if
                # the nuisance parameter exists
                for index_l in range(self.nlmax):
                    epsilon = data.mcmc_parameters['epsilon']['current'] * (
                        data.mcmc_parameters['epsilon']['scale'])
                    pk[index_l, :] *= (1. + epsilon * E_th_nu[index_l, :])

        # Start loop over l for computation of C_l^shear
        Cl_integrand = np.zeros((self.nzmax, self.nbin, self.nbin), 'float64')
        Cl = np.zeros((self.nlmax, self.nbin, self.nbin), 'float64')
        # Start loop over l for computation of E_l
        if self.theoretical_error != 0:
            El_integrand = np.zeros((self.nzmax, self.nbin, self.nbin),
                                    'float64')
            El = np.zeros((self.nlmax, self.nbin, self.nbin), 'float64')

        for nl in xrange(self.nlmax):

            # find Cl_integrand = (g(r) / r)**2 * P(l/r,z(r))
            for Bin1 in xrange(self.nbin):
                for Bin2 in xrange(Bin1, self.nbin):
                    Cl_integrand[1:, Bin1,
                                 Bin2] = g[1:, Bin1] * g[1:, Bin2] / (
                                     self.r[1:]**2) * pk[nl, 1:]
                    if self.theoretical_error != 0:
                        El_integrand[1:, Bin1, Bin2] = g[1:, Bin1] * (
                            g[1:, Bin2]) / (self.r[1:]**
                                            2) * pk[nl, 1:] * alpha[nl, 1:]

            # Integrate over r to get C_l^shear_ij = P_ij(l)
            # C_l^shear_ij = 9/16 Omega0_m^2 H_0^4 \sum_0^rmax dr (g_i(r)
            # g_j(r) /r**2) P(k=l/r,z(r))
            # It it then multiplied by 9/16*Omega_m**2 to be in units of Mpc**4
            # and then by (h/2997.9)**4 to be dimensionless
            for Bin1 in xrange(self.nbin):
                for Bin2 in xrange(Bin1, self.nbin):
                    Cl[nl, Bin1,
                       Bin2] = np.sum(0.5 * (Cl_integrand[1:, Bin1, Bin2] +
                                             Cl_integrand[:-1, Bin1, Bin2]) *
                                      (self.r[1:] - self.r[:-1]))
                    Cl[nl, Bin1, Bin2] *= 9. / 16. * (cosmo.Omega_m())**2
                    Cl[nl, Bin1, Bin2] *= (cosmo.h() / 2997.9)**4

                    if self.theoretical_error != 0:
                        El[nl, Bin1, Bin2] = np.sum(
                            0.5 * (El_integrand[1:, Bin1, Bin2] +
                                   El_integrand[:-1, Bin1, Bin2]) *
                            (self.r[1:] - self.r[:-1]))
                        El[nl, Bin1, Bin2] *= 9. / 16. * (cosmo.Omega_m())**2
                        El[nl, Bin1, Bin2] *= (cosmo.h() / 2997.9)**4
                    if Bin1 == Bin2:
                        Cl[nl, Bin1, Bin2] += self.noise

        # Write fiducial model spectra if needed (exit in that case)
        if self.fid_values_exist is False:
            # Store the values now, and exit.
            fid_file_path = os.path.join(self.data_directory,
                                         self.fiducial_file)
            with open(fid_file_path, 'w') as fid_file:
                fid_file.write('# Fiducial parameters')
                for key, value in io_mp.dictitems(data.mcmc_parameters):
                    fid_file.write(', %s = %.5g' %
                                   (key, value['current'] * value['scale']))
                fid_file.write('\n')
                for nl in range(self.nlmax):
                    for Bin1 in range(self.nbin):
                        for Bin2 in range(self.nbin):
                            fid_file.write("%.8g\n" % Cl[nl, Bin1, Bin2])
            print('\n')
            warnings.warn(
                "Writing fiducial model in %s, for %s likelihood\n" %
                (self.data_directory + '/' + self.fiducial_file, self.name))
            return 1j

        # Now that the fiducial model is stored, we add the El to both Cl and
        # Cl_fid (we create a new array, otherwise we would modify the
        # self.Cl_fid from one step to the other)

        # Spline Cl[nl,Bin1,Bin2] along l
        spline_Cl = np.empty((self.nbin, self.nbin), dtype=(list, 3))
        for Bin1 in xrange(self.nbin):
            for Bin2 in xrange(Bin1, self.nbin):
                spline_Cl[Bin1,
                          Bin2] = list(itp.splrep(self.l, Cl[:, Bin1, Bin2]))
                if Bin2 > Bin1:
                    spline_Cl[Bin2, Bin1] = spline_Cl[Bin1, Bin2]

        # Spline El[nl,Bin1,Bin2] along l
        if self.theoretical_error != 0:
            spline_El = np.empty((self.nbin, self.nbin), dtype=(list, 3))
            for Bin1 in xrange(self.nbin):
                for Bin2 in xrange(Bin1, self.nbin):
                    spline_El[Bin1, Bin2] = list(
                        itp.splrep(self.l, El[:, Bin1, Bin2]))
                    if Bin2 > Bin1:
                        spline_El[Bin2, Bin1] = spline_El[Bin1, Bin2]

        # Spline Cl_fid[nl,Bin1,Bin2]    along l
        spline_Cl_fid = np.empty((self.nbin, self.nbin), dtype=(list, 3))
        for Bin1 in xrange(self.nbin):
            for Bin2 in xrange(Bin1, self.nbin):
                spline_Cl_fid[Bin1, Bin2] = list(
                    itp.splrep(self.l, self.Cl_fid[:, Bin1, Bin2]))
                if Bin2 > Bin1:
                    spline_Cl_fid[Bin2, Bin1] = spline_Cl_fid[Bin1, Bin2]

        # Compute likelihood

        # Prepare interpolation for every integer value of l, from the array
        # self.l, to finally compute the likelihood (sum over all l's)
        dof = 1. / (int(self.l[-1]) - int(self.l[0]) + 1)

        ells = list(range(int(self.l[0]), int(self.l[-1]) + 1))

        # Define cov theory, observ and error on the whole integer range of ell
        # values
        Cov_theory = np.zeros((len(ells), self.nbin, self.nbin), 'float64')
        Cov_observ = np.zeros((len(ells), self.nbin, self.nbin), 'float64')
        Cov_error = np.zeros((len(ells), self.nbin, self.nbin), 'float64')

        for Bin1 in xrange(self.nbin):
            for Bin2 in xrange(Bin1, self.nbin):
                Cov_theory[:, Bin1, Bin2] = itp.splev(ells, spline_Cl[Bin1,
                                                                      Bin2])
                Cov_observ[:, Bin1,
                           Bin2] = itp.splev(ells, spline_Cl_fid[Bin1, Bin2])
                if self.theoretical_error > 0:
                    Cov_error[:, Bin1,
                              Bin2] = itp.splev(ells, spline_El[Bin1, Bin2])
                if Bin2 > Bin1:
                    Cov_theory[:, Bin2, Bin1] = Cov_theory[:, Bin1, Bin2]
                    Cov_observ[:, Bin2, Bin1] = Cov_observ[:, Bin1, Bin2]
                    Cov_error[:, Bin2, Bin1] = Cov_error[:, Bin1, Bin2]

        chi2 = 0.

        # TODO parallelize this
        for index, ell in enumerate(ells):

            if self.use_lmax_lincut:
                CutBin = -1
                for zBin in xrange(self.nbin):
                    if (ell < l_lincut_mean[zBin]):
                        CutBin = zBin
                        det_theory = np.linalg.det(Cov_theory[index, CutBin:,
                                                              CutBin:])
                        det_observ = np.linalg.det(Cov_observ[index, CutBin:,
                                                              CutBin:])
                        break
                if (CutBin == -1):
                    break
            else:
                det_theory = np.linalg.det(Cov_theory[index, :, :])
                det_observ = np.linalg.det(Cov_observ[index, :, :])

            if (self.theoretical_error > 0):
                det_cross_err = 0
                for i in range(self.nbin):
                    newCov = np.copy(Cov_theory[
                        index, :, :])  #MArchi#newCov = np.copy(Cov_theory)
                    newCov[:, i] = Cov_error[
                        index, :, i]  #MArchi#newCov[:, i] = Cov_error[:, i]
                    det_cross_err += np.linalg.det(newCov)

                # Newton method
                # Find starting point for the method:
                start = 0
                step = 0.001 * det_theory / det_cross_err
                error = 1
                old_chi2 = -1. * data.boundary_loglike
                error_tol = 0.01
                epsilon_l = start
                while error > error_tol:
                    vector = np.array(
                        [epsilon_l - step, epsilon_l, epsilon_l + step])
                    #print(vector.shape)
                    # Computing the function on three neighbouring points
                    function_vector = np.zeros(3, 'float64')
                    for k in range(3):
                        Cov_theory_plus_error = Cov_theory + vector[
                            k] * Cov_error
                        det_theory_plus_error = np.linalg.det(
                            Cov_theory_plus_error[index, :, :]
                        )  #MArchi#det_theory_plus_error = np.linalg.det(Cov_theory_plus_error)
                        det_theory_plus_error_cross_obs = 0
                        for i in range(self.nbin):
                            newCov = np.copy(
                                Cov_theory_plus_error[index, :, :]
                            )  #MArchi#newCov = np.copy(Cov_theory_plus_error)
                            newCov[:, i] = Cov_observ[
                                index, :,
                                i]  #MArchi#newCov[:, i] = Cov_observ[:, i]
                            det_theory_plus_error_cross_obs += np.linalg.det(
                                newCov)
                        try:
                            function_vector[k] = (
                                2. * ell + 1.) * self.fsky * (
                                    det_theory_plus_error_cross_obs /
                                    det_theory_plus_error + math.log(
                                        det_theory_plus_error / det_observ) -
                                    self.nbin) + dof * vector[k]**2
                        except ValueError:
                            warnings.warn(
                                "ska1_lensing: Could not evaluate chi2 including theoretical error with the current parameters. The corresponding chi2 is now set to nan!"
                            )
                            break
                            break
                            break
                            chi2 = np.nan

                    # Computing first
                    first_d = (function_vector[2] -
                               function_vector[0]) / (vector[2] - vector[0])
                    second_d = (function_vector[2] + function_vector[0] -
                                2 * function_vector[1]) / (vector[2] -
                                                           vector[1])**2

                    # Updating point and error
                    epsilon_l = vector[1] - first_d / second_d
                    error = abs(function_vector[1] - old_chi2)
                    old_chi2 = function_vector[1]
        # End Newton

                Cov_theory_plus_error = Cov_theory + epsilon_l * Cov_error
                det_theory_plus_error = np.linalg.det(
                    Cov_theory_plus_error[index, :, :]
                )  #MArchi#det_theory_plus_error = np.linalg.det(Cov_theory_plus_error)

                det_theory_plus_error_cross_obs = 0
                for i in range(self.nbin):
                    newCov = np.copy(
                        Cov_theory_plus_error[index, :, :]
                    )  #MArchi#newCov = np.copy(Cov_theory_plus_error)
                    newCov[:, i] = Cov_observ[
                        index, :, i]  #MArchi#newCov[:, i] = Cov_observ[:, i]
                    det_theory_plus_error_cross_obs += np.linalg.det(newCov)

                chi2 += (2. * ell + 1.) * self.fsky * (
                    det_theory_plus_error_cross_obs / det_theory_plus_error +
                    math.log(det_theory_plus_error / det_observ) -
                    self.nbin) + dof * epsilon_l**2

            else:
                if self.use_lmax_lincut:
                    det_cross = 0.
                    for i in xrange(0, self.nbin - CutBin):
                        newCov = np.copy(Cov_theory[index, CutBin:, CutBin:])
                        newCov[:, i] = Cov_observ[index, CutBin:, CutBin + i]
                        det_cross += np.linalg.det(newCov)
                else:
                    det_cross = 0.
                    for i in xrange(self.nbin):
                        newCov = np.copy(Cov_theory[index, :, :])
                        newCov[:, i] = Cov_observ[index, :, i]
                        det_cross += np.linalg.det(newCov)

                if self.use_lmax_lincut:
                    chi2 += (2. * ell + 1.) * self.fsky * (
                        det_cross / det_theory +
                        math.log(det_theory / det_observ) - self.nbin + CutBin)
                else:
                    chi2 += (2. * ell + 1.) * self.fsky * (
                        det_cross / det_theory +
                        math.log(det_theory / det_observ) - self.nbin)

        # Finally adding a gaussian prior on the epsilon nuisance parameter, if
        # present
        if 'epsilon' in self.use_nuisance:
            epsilon = data.mcmc_parameters['epsilon']['current'] * \
                data.mcmc_parameters['epsilon']['scale']
            chi2 += epsilon**2

        #end = time.time()
        #print("time needed in s:",(end-start))

        return -chi2 / 2.
コード例 #45
0
def _eval_bspline_basis(x, knots, degree, deriv='all', include_intercept=True):
    try:
        from scipy.interpolate import splev
    except ImportError:
        raise ImportError("spline functionality requires scipy")
    # 'knots' are assumed to be already pre-processed. E.g. usually you
    # want to include duplicate copies of boundary knots; you should do
    # that *before* calling this constructor.
    knots = np.atleast_1d(np.asarray(knots, dtype=float))
    assert knots.ndim == 1
    knots.sort()
    degree = int(degree)
    x = np.atleast_1d(x)
    if x.ndim == 2 and x.shape[1] == 1:
        x = x[:, 0]
    assert x.ndim == 1
    # XX FIXME: when points fall outside of the boundaries, splev and R seem
    # to handle them differently. I do not know why yet. So until we understand
    # this and decide what to do with it, I'm going to play it safe and
    # disallow such points.
    if np.min(x) < np.min(knots) or np.max(x) > np.max(knots):
        raise NotImplementedError("some data points fall outside the "
                                  "outermost knots, and I'm not sure how "
                                  "to handle them. (Patches accepted!)")
    # Thanks to Charles Harris for explaining splev. It's not well
    # documented, but basically it computes an arbitrary b-spline basis
    # given knots and degree on some specificed points (or derivatives
    # thereof, but we do not use that functionality), and then returns some
    # linear combination of these basis functions. To get out the basis
    # functions themselves, we use linear combinations like [1, 0, 0], [0,
    # 1, 0], [0, 0, 1].
    # NB: This probably makes it rather inefficient (though I have not checked
    # to be sure -- maybe the fortran code actually skips computing the basis
    # function for coefficients that are zero).
    # Note: the order of a spline is the same as its degree + 1.
    # Note: there are (len(knots) - order) basis functions.

    k_const = 1 - int(include_intercept)
    n_bases = len(knots) - (degree + 1) - k_const
    if deriv in ['all', 0]:
        basis = np.empty((x.shape[0], n_bases), dtype=float)
        ret = basis
    if deriv in ['all', 1]:
        der1_basis = np.empty((x.shape[0], n_bases), dtype=float)
        ret = der1_basis
    if deriv in ['all', 2]:
        der2_basis = np.empty((x.shape[0], n_bases), dtype=float)
        ret = der2_basis

    for i in range(n_bases):
        coefs = np.zeros((n_bases + k_const, ))
        # we are skipping the first column of the basis to drop constant
        coefs[i + k_const] = 1
        ii = i
        if deriv in ['all', 0]:
            basis[:, ii] = splev(x, (knots, coefs, degree))
        if deriv in ['all', 1]:
            der1_basis[:, ii] = splev(x, (knots, coefs, degree), der=1)
        if deriv in ['all', 2]:
            der2_basis[:, ii] = splev(x, (knots, coefs, degree), der=2)

    if deriv == 'all':
        return basis, der1_basis, der2_basis
    else:
        return ret
コード例 #46
0
            density_inte_gas[i][j] * l[i]**2 * 4 * np.pi, l[i])
        M_inte_cga[i][j] = np.trapz(
            density_inte_cga[i][j] * l[i]**2 * 4 * np.pi, l[i])

M_inte_nfw = np.transpose(M_inte_nfw)
M_inte_gas = np.transpose(M_inte_gas)
M_inte_cga = np.transpose(M_inte_cga)
Z = []
a = 0.68
n = 1
zeta = 1.376
f_inte_cga = 2 * NN * ((M_200 / M_1)**(-zeta) +
                       (M_200 / M_1)**(yita_cga))**(-1.0)
f_inte_star = 2 * NN * ((M_200 / M_1)**(-zeta) +
                        (M_200 / M_1)**(yita_star))**(-1.0)
f_inte_gas = omega_b / omega_m - f_inte_star
f_inte_clm = omega_dm / omega_m + f_inte_star - f_inte_cga

for j in range(le):
    M_inte_cga_f = splrep(r, M_inte_cga[j])
    M_inte_gas_f = splrep(r, M_inte_gas[j])
    M_inte_nfw_f = splrep(r, M_inte_nfw[j])
    func_1 = lambda k: a * (
        (splev(r, M_inte_nfw_f) /
         (f_inte_clm[j] * splev(r, M_inte_nfw_f) + f_inte_cga[j] * splev(
             r * k, M_inte_cga_f) + f_inte_gas[j] * splev(r * k, M_inte_gas_f)
          ))**n - 1) + 1 - k
    Z.append(scipy.optimize.broyden1(func_1, r * 0 + 2, f_tol=1e-10))
np.savetxt('adiabadic_yita_2.txt', np.transpose(Z), fmt='%.8g')
print("adiabadic run successfully")
コード例 #47
0
    def function(self, lamb, Av=1, Rv=2.74, Alambda=True, **kwargs):
        """
        Gordon03_SMCBar extinction law

        Parameters
        ----------
        lamb: float or ndarray(dtype=float)
            wavelength [in Angstroms] at which to evaluate the law.

        Av: float
            desired A(V) (default 1.0)

        Rv: float
            desired R(V) (default 2.74)

        Alambda: bool
            if set returns +2.5*1./log(10.)*tau, tau otherwise

        Returns
        -------
        r: float or ndarray(dtype=float)
            attenuation as a function of wavelength
            depending on Alambda option +2.5*1./log(10.)*tau,  or tau
        """
        # ensure the units are in angstrom
        _lamb = units.Quantity(lamb, units.angstrom).value

        if isinstance(_lamb, float) or isinstance(_lamb, np.float_):
            _lamb = np.asarray([lamb])
        else:
            _lamb = lamb[:]

        # convert to wavenumbers
        x = 1.0e4 / _lamb

        # check that the wavenumbers are within the defined range
        _test_valid_x_range(x, self.x_range, self.name)

        # set Rv explicitly to the fixed value
        Rv = self.Rv

        c1 = -4.959 / Rv
        c2 = 2.264 / Rv
        c3 = 0.389 / Rv
        c4 = 0.461 / Rv
        x0 = 4.6
        gamma = 1.0

        k = np.zeros(np.size(x))

        # UV part
        xcutuv = 10000.0 / 2700.0
        xspluv = 10000.0 / np.array([2700.0, 2600.0])

        ind = np.where(x >= xcutuv)
        if np.size(ind) > 0:
            k[ind] = (1.0 + c1 + (c2 * x[ind]) + c3 * ((x[ind])**2) /
                      (((x[ind])**2 - (x0**2))**2 + (gamma**2) *
                       ((x[ind])**2)))
            yspluv = (1.0 + c1 + (c2 * xspluv) + c3 * ((xspluv)**2) /
                      (((xspluv)**2 - (x0**2))**2 + (gamma**2) *
                       ((xspluv)**2)))

        # FUV portion
        ind = np.where(x >= 5.9)
        if np.size(ind) > 0:
            k[ind] += c4 * (0.5392 * ((x[ind] - 5.9)**2) + 0.05644 *
                            ((x[ind] - 5.9)**3))

        # Opt/NIR part
        ind = np.where(x < xcutuv)
        if np.size(ind) > 0:
            xsplopir = np.zeros(9)
            xsplopir[0] = 0.0
            xsplopir[1:10] = 1.0 / np.array(
                [2.198, 1.65, 1.25, 0.81, 0.65, 0.55, 0.44, 0.37])

            # Values directly from Gordon et al. (2003)
            # ysplopir =  np.array([0.0,0.016,0.169,0.131,0.567,0.801,
            #                      1.00,1.374,1.672])
            # K & J values adjusted to provide a smooth,
            #      non-negative cubic spline interpolation
            ysplopir = np.array(
                [0.0, 0.11, 0.169, 0.25, 0.567, 0.801, 1.00, 1.374, 1.672])

            tck = interpolate.splrep(np.hstack([xsplopir, xspluv]),
                                     np.hstack([ysplopir, yspluv]),
                                     k=3)
            k[ind] = interpolate.splev(x[ind], tck)

        if Alambda:
            return k * Av
        else:
            return k * Av * (np.log(10.0) * 0.4)
コード例 #48
0
                 print("wrong")

        #cutpoint is the result of segment, make 10 segments

        # i is from 0 - 10
        for i in range(0, len(cutpoint) - 1):
            #print(i)
            gesture_emg1 = np.array(emg1[cutpoint[i]:cutpoint[i + 1]])
            # plt.plot(gesture)
            #print('length of gesture:%d' % len(gesture_emg1))
            x = np.linspace(0, len(gesture_emg1), len(gesture_emg1))
            #print('length of x = %d'%len(x))
            x_new = np.linspace(0, len(gesture_emg1), 5000)
            # print('length of x_new = %d'%len(x_new))
            tck_emg1 = interpolate.splrep(x, gesture_emg1)
            gesture_emg1_bspline = interpolate.splev(x_new, tck_emg1)
            gesture_emg1_bspline_abs = list(map(abs, gesture_emg1_bspline))
            # print(gesture_bspline)
            # plt.plot(x, gesture, "o", label=u"original data")
            #print('length of gesture after interpolate:%d' % len(gesture_emg1_bspline))
            # plt.plot(gesture_bspline, label=u"B-spline interpolate")
            # pl.legend()
            # pl.show()
            #the gesture_bspline list is the result of one gesture of one emg

            gesture_emg2 = np.array(emg2[cutpoint[i]:cutpoint[i + 1]])
            tck_emg2 = interpolate.splrep(x, gesture_emg2)
            gesture_emg2_bspline = interpolate.splev(x_new, tck_emg2)
            gesture_emg2_bspline_abs = list(map(abs, gesture_emg2_bspline))

コード例 #49
0
for band in bands:
    # loads filter transmission curve file
    filtname = filtdir+'/%s_OmegaCAM.res'%band

    f = open(filtname, 'r')
    filt_wave, filt_t = np.loadtxt(f, unpack=True)
    f.close()

    filt_spline = splrep(filt_wave, filt_t)

    wmin_filt, wmax_filt = filt_wave[0], filt_wave[-1]
    cond_filt = (wave_obs>=wmin_filt)&(wave_obs<=wmax_filt)
    nu_cond = np.flipud(cond_filt)

    # Evaluate the filter response at the wavelengths of the spectrum
    response = splev(wave_obs[cond_filt], filt_spline)
    nu_filter = csol*1e8/wave_obs[cond_filt]

    # flips arrays
    response = np.flipud(response)
    nu_filter = np.flipud(nu_filter)

    # filter normalization
    bp = splrep(nu_filter, response/nu_filter, s=0, k=1)
    bandpass = splint(nu_filter[0], nu_filter[-1], bp)

    # Integrate
    observed = splrep(nu_filter, response*fnu[nu_cond]/nu_filter, s=0, k=1)
    flux = splint(nu_filter[0], nu_filter[-1], observed)

    mag = -2.5*np.log10(flux/bandpass) -48.6 -2.5*log_mstar
コード例 #50
0
def smooth_function(x, y):
    tck = interpolate.splrep(x_ts, y, s=50)
    y_smooth = interpolate.splev(x_ts, tck, der=0)

    return y_smooth
コード例 #51
0
ファイル: signal.py プロジェクト: BlehMaks/radis-1
def resample(
    xspace,
    vector,
    xspace_new,
    k=1,
    ext="error",
    energy_threshold=1e-3,
    print_conservation=True,
):
    """Resample (xspace, vector) on a new space (xspace_new) of evenly
    distributed data and whose bounds are taken as the same as `xspace`.

    Uses spline interpolation to create the intermediary points. Number of points
    is the same as the initial xspace, times a resolution factor. Verifies energy
    conservation on the intersecting range at the end.


    Parameters
    ----------

    xspace: array
        space on which vector was generated

    vector: array
        quantity to resample

    resfactor: array
        xspace vector to resample on

    k: int
        order of spline interpolation. 3: cubic, 1: linear. Default 1.

    ext: 'error', 'extrapolate', 0, 1
        Controls the value returned for elements of xspace_new not in the interval
        defined by xspace. If 'error', raise a ValueError. If 'extrapolate', well,
        extrapolate. If '0' or 0, then fill with 0. If 1, fills with 1.
        Default 'error'.

    energy_threshold: float
        if energy conservation (integrals on the intersecting range) is above
        this threshold, raise an error. If None, dont check for energy conservation
        Default 1e-3 (0.1%)

    print_conservation: boolean
        if True, prints energy conservation


    Returns
    -------

    vector_new: array
        resampled vector on evenly spaced array. Number of element is conserved.

    Note that depending upon the from_space > to_space operation, sorting may
    be reversed.


    Examples
    --------

    Resample a :class:`~radis.spectrum.spectrum.Spectrum` radiance
    on an evenly spaced wavenumber space::

        w_nm, I_nm = s.get('radiance')
        w_cm, I_cm = resample_even(nm2cm(w_nm), I_nm)
    """

    if len(xspace) != len(vector):
        raise ValueError("vector and xspace should have the same length. " +
                         "Got {0}, {1}".format(len(vector), len(xspace)))

    # Check reversed (interpolation requires objects are sorted)
    if is_sorted(xspace):
        reverse = False
    elif is_sorted_backward(xspace):
        reverse = True
    else:
        raise ValueError(
            "Resampling requires wavespace to be sorted. It is not!")

    if reverse:
        xspace = xspace[::-1]
        xspace_new = xspace_new[::-1]
        vector = vector[::-1]

    # translate ext in FITPACK syntax for splev
    if ext == "extrapolate":
        ext_fitpack = 0  # splev returns extrapolated value
    elif ext in [0, "0", 1, "1", nan, "nan"]:
        ext_fitpack = 1  # splev returns 0  (fixed in post-processing)
    elif ext == "error":
        ext_fitpack = 2  # splev raises ValueError
    else:
        raise ValueError("Unexpected value for `ext`: {0}".format(ext))

    if isnan(vector).sum() > 0:
        raise ValueError(
            "Resampled vector has {0} nans. Interpolation will fail".format(
                isnan(vector).sum()))

    # Resample the slit function on the spectrum grid
    try:
        tck = splrep(xspace, vector, k=k)
    except ValueError:
        # Probably error on input data. Print it before crashing.
        print("\nValueError - Input data below:")
        print("-" * 5)
        print(xspace)
        print(vector)
        print("Check plot 101 too")
        import matplotlib.pyplot as plt

        plt.figure(101).clear()
        plt.plot(xspace, vector)
        plt.xlabel("xspace")
        plt.ylabel("vector")
        plt.title("ValueError")
        raise
    vector_new = splev(xspace_new, tck, ext=ext_fitpack)

    # ... get masks
    b = (xspace >= xspace_new.min()) * (xspace <= xspace_new.max())
    b_new = (xspace_new >= xspace.min()) * (xspace_new <= xspace.max())

    # fix filling for out of boundary values
    if ext in [1, "1"]:
        vector_new[~b_new] = 1
        if __debug__:
            printdbg("Filling with 1 on w<{0}, w>{1} ({2} points)".format(
                xspace.min(), xspace.max(), (1 - b_new).sum()))
    elif ext in [nan, "nan"]:
        vector_new[~b_new] = nan
        if __debug__:
            printdbg("Filling with nans on w<{0}, w>{1} ({2} points)".format(
                xspace.min(), xspace.max(), (1 - b_new).sum()))

    # Check energy conservation:

    # ... calculate energy
    energy0 = abs(trapz(vector[b], x=xspace[b]))
    energy_new = abs(trapz(vector_new[b_new], x=xspace_new[b_new]))
    if energy_new == 0:  # deal with particular case of energy = 0
        if energy0 == 0:
            energy_ratio = 1
        else:
            energy_ratio = 0
    else:  # general case
        energy_ratio = energy0 / energy_new
    if energy_threshold:
        if abs(energy_ratio - 1) > energy_threshold:
            import matplotlib.pyplot as plt

            plt.figure(101).clear()
            plt.plot(xspace, vector, "-ok", label="original")
            plt.plot(xspace_new, vector_new, "-or", label="resampled")
            plt.xlabel("xspace")
            plt.ylabel("vector")
            plt.legend()
            raise ValueError(
                "Error in resampling: " +
                "energy conservation ({0:.5g}%) below tolerance level ({1:.5g}%)"
                .format((1 - energy_ratio) * 100, energy_threshold * 100) +
                ". Check graph 101. " +
                "Increasing energy_threshold is possible but not recommended")
    if print_conservation:
        print("Resampling - Energy conservation: {0:.5g}%".format(
            energy_ratio * 100))

    # Reverse again
    if reverse:
        # xspace_new = xspace_new[::-1]
        vector_new = vector_new[::-1]

    return vector_new
コード例 #52
0
    def function(self, lamb, Av=1, Rv=3.1, Alambda=True, **kwargs):
        """
        Fitzpatrick99 extinction Law

        Parameters
        ----------
        lamb: float or ndarray(dtype=float)
            wavelength [in Angstroms] at which to evaluate the law.

        Av: float
            desired A(V) (default 1.0)

        Rv: float
            desired R(V) (default 3.1)

        Alambda: bool
            if set returns +2.5*1./log(10.)*tau, tau otherwise

        Returns
        -------
        r: float or ndarray(dtype=float)
            attenuation as a function of wavelength
            depending on Alambda option +2.5*1./log(10.)*tau,  or tau
        """
        # ensure the units are in angstrom
        _lamb = units.Quantity(lamb, units.angstrom).value

        if isinstance(_lamb, float) or isinstance(_lamb, np.float_):
            _lamb = np.asarray([lamb])
        else:
            _lamb = lamb[:]

        # convert to wavenumbers
        x = 1.0e4 / _lamb

        # check that the wavenumbers are within the defined range
        _test_valid_x_range(x, self.x_range, self.name)

        # initialize values
        c2 = -0.824 + 4.717 / Rv
        c1 = 2.030 - 3.007 * c2
        c3 = 3.23
        c4 = 0.41
        x0 = 4.596
        gamma = 0.99

        k = np.zeros(np.size(x))

        # compute the UV portion of A(lambda)/E(B-V)
        xcutuv = 10000.0 / 2700.0
        xspluv = 10000.0 / np.array([2700.0, 2600.0])
        ind = np.where(x >= xcutuv)

        if np.size(ind) > 0:
            k[ind] = (c1 + (c2 * x[ind]) + c3 * ((x[ind])**2) /
                      (((x[ind])**2 - (x0**2))**2 + (gamma**2) *
                       ((x[ind])**2)))
            yspluv = (c1 + (c2 * xspluv) + c3 * ((xspluv)**2) /
                      (((xspluv)**2 - (x0**2))**2 + (gamma**2) *
                       ((xspluv)**2)))

            # FUV portion
            fuvind = np.where(x >= 5.9)
            k[fuvind] += c4 * (0.5392 * ((x[fuvind] - 5.9)**2) + 0.05644 *
                               ((x[fuvind] - 5.9)**3))

            k[ind] += Rv
            yspluv += Rv

        # Optical/NIR portion

        ind = np.where(x < xcutuv)
        if np.size(ind) > 0:
            xsplopir = np.zeros(7)
            xsplopir[0] = 0.0
            xsplopir[1:7] = 10000.0 / np.array(
                [26500.0, 12200.0, 6000.0, 5470.0, 4670.0, 4110.0])

            ysplopir = np.zeros(7)
            ysplopir[0:3] = np.array([0.0, 0.26469, 0.82925]) * Rv / 3.1

            ysplopir[3:7] = np.array([
                np.poly1d([2.13572e-04, 1.00270, -4.22809e-01])(Rv),
                np.poly1d([-7.35778e-05, 1.00216, -5.13540e-02])(Rv),
                np.poly1d([-3.32598e-05, 1.00184, 7.00127e-01])(Rv),
                np.poly1d([
                    1.19456, 1.01707, -5.46959e-03, 7.97809e-04, -4.45636e-05
                ][::-1])(Rv),
            ])

            tck = interpolate.splrep(np.hstack([xsplopir, xspluv]),
                                     np.hstack([ysplopir, yspluv]),
                                     k=3)
            k[ind] = interpolate.splev(x[ind], tck)

        # convert from A(lambda)/E(B-V) to A(lambda)/A(V)
        k /= Rv

        # setup the output
        if Alambda:
            return k * Av
        else:
            return k * Av * (np.log(10.0) * 0.4)
コード例 #53
0
ファイル: tesss.py プロジェクト: ristofer/ugly_teropobot
def b_angles(t):
    real_t = t % 2
    print real_t
    return splev(real_t + 4.081279275, tck)
コード例 #54
0
def convert(file_name, physics, timestep):
    """Converts the parsed .amc values into qpos and qvel values and resamples.

  Args:
    file_name: The .amc file to be parsed and converted.
    physics: The corresponding physics instance.
    timestep: Desired output interval between resampled frames.

  Returns:
    A namedtuple with fields:
        `qpos`, a numpy array containing converted positional variables.
        `qvel`, a numpy array containing converted velocity variables.
        `time`, a numpy array containing the corresponding times.
  """
    frame_values = parse(file_name)
    joint2index = {}
    for name in physics.named.data.qpos.axes.row.names:
        joint2index[name] = physics.named.data.qpos.axes.row.convert_key_item(
            name)
    index2joint = {}
    for joint, index in joint2index.items():
        if isinstance(index, slice):
            indices = range(index.start, index.stop)
        else:
            indices = [index]
        for ii in indices:
            index2joint[ii] = joint

    # Convert frame_values to qpos
    amcvals2qpos_transformer = Amcvals2qpos(index2joint,
                                            _CMU_MOCAP_JOINT_ORDER)
    qpos_values = []
    for frame_value in frame_values:
        qpos_values.append(amcvals2qpos_transformer(frame_value))
    qpos_values = np.stack(qpos_values)  # Time by nq

    # Interpolate/resample.
    # Note: interpolate quaternions rather than euler angles (slerp).
    # see https://en.wikipedia.org/wiki/Slerp
    qpos_values_resampled = []
    time_vals = np.arange(0, len(frame_values) * MOCAP_DT - 1e-8, MOCAP_DT)
    time_vals_new = np.arange(0, len(frame_values) * MOCAP_DT, timestep)
    while time_vals_new[-1] > time_vals[-1]:
        time_vals_new = time_vals_new[:-1]

    for i in range(qpos_values.shape[1]):
        f = interpolate.splrep(time_vals, qpos_values[:, i])
        qpos_values_resampled.append(interpolate.splev(time_vals_new, f))

    qpos_values_resampled = np.stack(qpos_values_resampled)  # nq by ntime

    qvel_list = []
    for t in range(qpos_values_resampled.shape[1] - 1):
        p_tp1 = qpos_values_resampled[:, t + 1]
        p_t = qpos_values_resampled[:, t]
        qvel = [(p_tp1[:3] - p_t[:3]) / timestep,
                mjmath.mj_quat2vel(mjmath.mj_quatdiff(p_t[3:7], p_tp1[3:7]),
                                   timestep), (p_tp1[7:] - p_t[7:]) / timestep]
        qvel_list.append(np.concatenate(qvel))

    qvel_values_resampled = np.vstack(qvel_list).T

    return Converted(qpos_values_resampled, qvel_values_resampled,
                     time_vals_new)
コード例 #55
0
ファイル: red_corr.py プロジェクト: EJH-ljmu/PyNeb_devel
    def _F99_like(self, wave):
        """
        In the UV, it returns the Fitzpatrick & Massa 1990 law.
        In the opt/IR, it returns the Fitzpatrick & Massa 1990 law. 
        
        Fitzpatrick 1999, PASP, 11, 63
        http://adsabs.harvard.edu/abs/1999PASP..111...63F
        Fitzpatrick & Massa 1990, ApJS, 72, 163
        http://adsabs.harvard.edu/abs/1990ApJS...72..163F
        
        Comments:
        The FM90 depends on 6 parameters which must be set by the user and are stored in RedCorr.FitzParams.
        For the predefined set of parameters defined in FM99, use instead the F99 law.
        R_V must be provided, as the law depends on it. The dependence with R_V follows Table 4 in the F99 paper 

        Range: UV through IR
        
        """
        def fit_UV(x):

            Xx = c1 + c2 * x
            Xx += c3 * x**2 / ((x**2 - x0**2)**2 + (x * gamma)**2)
            tt2 = (x > 5.9)
            if tt2 is not False:
                Xx[tt2] += c4 * (0.5392 * (x[tt2] - 5.9)**2 + 0.05644 *
                                 (x[tt2] - 5.9)**3)
            Xx += self.R_V
            return Xx

        x = 1e4 / np.asarray([wave])  # inv microns
        Xx = np.zeros_like(x)
        if self.FitzParams is None:
            pn.log_.warn('Fitzpatrick law requires FitzParams',
                         calling=self.calling)
            return None
        x0 = self.FitzParams[0]
        gamma = self.FitzParams[1]
        c1 = self.FitzParams[2]
        c2 = self.FitzParams[3]
        c3 = self.FitzParams[4]
        c4 = self.FitzParams[5]

        # UV from the 1988 paper:
        xcutuv = 10000.0 / 2700.0
        tt = (x >= xcutuv)
        Xx[tt] = fit_UV(x[tt])

        l2x = lambda l: 1e4 / l
        x_opir = np.array([
            0,
            l2x(26500.0),
            l2x(12200.0),
            l2x(6000.0),
            l2x(5470.0),
            l2x(4670.0),
            l2x(4110.0),
            l2x(2700.),
            l2x(2600.)
        ])
        norm = self.R_V / 3.1

        # Opt and IR from the 1999 paper
        y_opir = np.array([
            0., 0.265 * norm, 0.829 * norm, -0.426 + 1.0044 * self.R_V,
            -0.050 + 1.0016 * self.R_V, 0.701 + 1.0016 * self.R_V,
            1.208 + 1.0032 * self.R_V - 0.00033 * self.R_V**2,
            fit_UV(l2x(2700.)),
            fit_UV(l2x(2600.))
        ])
        tt = x < xcutuv
        if tt.sum() > 0:
            tck = interpolate.splrep(x_opir, y_opir)
            Xx[tt] = interpolate.splev(x[tt], tck, der=0)
        return np.squeeze(Xx)

        # Commented out because it duplicates _F99_like
        #    def _F99_like_IDL(self, wave):
        """
        Same as F99_like, but with a different function in the opt/IR fitting, based on an IDL program
        provided by F99. The results should be identical.
    
        In the UV, it returns the Fitzpatrick & Massa 1990 law.
        In the opt/IR, it returns the Fitzpatrick 1990 law. 
        
        Fitzpatrick 1999, PASP, 11, 63
        http://adsabs.harvard.edu/abs/1999PASP..111...63F
        Fitzpatrick & Massa 1990, ApJS, 72, 163
        http://adsabs.harvard.edu/abs/1990ApJS...72..163F
        
        Comments:
        The FM90 depends on 6 parameters which must be set by the user and are stored in RedCorr.FitzParams.
        For the predefined set of parameters defined in FM99, use instead the F_99 method.
        R_V must be provided, as the law depends on it. The dependence with R_V follows Table 4 in the F99 paper 

        Scope:    
        Range: UV through IR
        
        
        """
        """
コード例 #56
0
ファイル: 04.py プロジェクト: ored95/AoD
# y[split+1] = y[split+1] * 1.01


def f(t):
    return sin(4 * pi * t) * exp(-fabs(t))


y = [f(xt) for xt in x]

# Tests
newX = np.arange(0.05, 0.93, 0.01)

# Spline
from scipy import interpolate
tck = interpolate.splrep(x, y, s=0)
Y_spline = interpolate.splev(newX, tck)

# Bline
from blib import Bline2
b = Bline2(x, y)
b.execute()
# print(b.A)
Y_Bline = b.interp(newX)

# WNN
from blib import WNN
wnn = WNN(epoch_max=50000, Nh_wnn=len(x), plot_flag=True)
wnn.load_first_function(x, y)
wnn.train()
Y_wnn = wnn.d
コード例 #57
0
def pz(om, h0, scenario=2):
    f0 = open('NSNSrates.dat')
    f1 = open('BHNSrates.dat')
    f2 = open('BHBHrates.dat')
    nsns, nsbh, bhbh = np.loadtxt(f0), np.loadtxt(f1), np.loadtxt(f2)
    f0.close, f1.close, f2.close
    nsns = nsns[nsns[:, 0].argsort(), ]
    nsbh = nsbh[nsbh[:, 0].argsort(), ]
    bhbh = bhbh[bhbh[:, 0].argsort(), ]
    #print nsns

    c = 299790.
    # speed of light [km/s]
    w = -1
    dH = c / h0
    rho0 = 8

    # detector horizon
    r0 = 1527.
    # in [Mpc] value for ET - polynomial Noise curve
    #r0 = 1591.;	# ET-D sensitivity
    #r0 = 1918.;    # ET - xylophone
    Mchirp0 = 1.2
    Mchirp1 = 3.2
    Mchirp2 = 6.7

    def Ez(z, om, w):
        return 1 / np.sqrt(om * (1 + z)**3 + (1 - om) * (1 + z)**(3 * (1 + w)))

    def r(z, om, w):
        return integrate.quad(Ez, 0, z,
                              args=(om, w))[0]  #use the cos distance r

    #print r(17,om,w)

    vec_r = np.vectorize(r)
    z = nsns[:, 0]
    N = len(z)
    Dist = vec_r(z, om, w)

    x = (rho0 / 8.) * (1 + z)**(1 / 6.) * dH * (Dist / r0) * (1.2)**(5 / 6.)
    x0 = x / (Mchirp0**(5 / 6.))
    x1 = x / (Mchirp1**(5 / 6.))
    x2 = x / (Mchirp2**(5 / 6.))
    Ctheta0, Ctheta1, Ctheta2 = np.zeros(N), np.zeros(N), np.zeros(N)
    for i in range(0, N):
        if x0[i] >= 0 and x0[i] <= 4:
            Ctheta0[i] = (1 + x0[i]) * (4 - x0[i])**4 / 256.
        else:
            Ctheta0[i] = 0
        if x1[i] >= 0 and x1[i] <= 4:
            Ctheta1[i] = (1 + x1[i]) * (4 - x1[i])**4 / 256.
        else:
            Ctheta1[i] = 0
        if x2[i] >= 0 and x2[i] <= 4:
            Ctheta2[i] = (1 + x2[i]) * (4 - x2[i])**4 / 256.
        else:
            Ctheta2[i] = 0
    #print Ctheta

    E = 1 / np.sqrt(om * (1 + z)**3 + (1 - om) * (1 + z)**(3 * (1 + w)))

    n0 = nsns[:, scenario] * 10**(-9)  # 2 is standard low
    s0dz = 4 * np.pi * (dH)**3. * (n0 / (1 + z)) * Dist**2 * E * Ctheta0
    #plt.plot(z,sDNdz,'k')
    n1 = nsbh[:, scenario] * 10**(-9)  # 2 is standard low
    s1dz = 4 * np.pi * (dH)**3. * (n1 / (1 + z)) * Dist**2 * E * Ctheta1
    n2 = bhbh[:, scenario] * 10**(-9)  # 2 is standard low
    s2dz = 4 * np.pi * (dH)**3. * (n2 / (1 + z)) * Dist**2 * E * Ctheta2

    dp = np.zeros([N, 4])
    dp[:, 0] = z
    dp[:,
       1] = s0dz  #for NS-NS note the real possibility should mutply by p(i)-p(i-1)
    dp[:, 2] = s1dz  #for NS-BH
    dp[:, 3] = s2dz  #for BH-BH
    ps = np.zeros([N, 2])
    ps[:, 0] = dp[:, 0]
    ps[:, 1] = dp[:, 1] + dp[:, 2] + dp[:, 3]
    #p[:,1]=p[:,1]

    ps0 = ps[ps[:, 0] < 2.98]
    ps1 = ps[ps[:, 0] > 2.98]
    newx = np.linspace(ps1[:, 0].min(), ps1[:, 0].max(), 144)
    tck = interpolate.splrep(ps1[:, 0], ps1[:, 1])
    newy = interpolate.splev(newx, tck)
    ps2 = np.vstack((newx, newy)).T
    newps = np.vstack((ps0, ps2))
    #plt.plot(ps[:,0],ps[:,1],'k')
    #plt.plot(newps[:,0],newps[:,1])
    #plt.show()
    N = len(newps[:, 0])
    #   R=np.zeros([N+1,3])
    #   R[:,0]=np.append(0,newps[:,0])
    #   R[:,1]=np.append(0,newps[:,1])
    #   R[0,2]=0
    R = np.zeros([N, 3])
    R[:, 0] = newps[:, 0]
    R[:, 1] = newps[:, 1]
    R[0, 2] = 0
    for i in range(N - 1):
        R[i + 1, 2] = R[i, 2] + R[i, 1] * (R[i + 1, 0] - R[i, 0])
    #print R
    R[:, 1] = R[:, 1] / R[-1, 2]
    R[:, 2] = R[:, 2] / R[-1, 2]
    #   print "R[-1,2]", R[-1,2]
    #   plt.plot(R[:,0], R[:,2])
    #   plt.show()
    return R
    Kx_months = Kx_months_full/(1+z[n])
    knots = knots_full/(1+z[n])
    
    j_spl = splrep(Jx_months, test_j_flux[n,:], w=1/test_j_fluxerr[n,:], t=knots)
    j_splines.append(j_spl)
    k_spl = splrep(Kx_months, test_k_flux[n,:], w=1/test_k_fluxerr[n,:], t=knots)
    k_splines.append(k_spl)
#    if save_ccf == True:
    fig = plt.figure(figsize=[9,6])
    ax1 = plt.subplot(111)
    plt.errorbar(Jx_months, test_j_flux[n,:], yerr=test_j_fluxerr[n,:], fmt='o', 
                 label='J-band')
    plt.errorbar(Kx_months, test_k_flux[n,:], yerr=test_k_fluxerr[n,:], fmt='o', 
                 label='K-band')
    #plt.plot(Kx_months, test_k_flux[testind,:], 'o', label='K-band')
    test_J_fit = splev(test_x, j_spl)
    test_K_fit = splev(test_x, k_spl)
    test_J_knots = splev(knots, j_spl)
    test_K_knots = splev(knots, k_spl)
    plt.plot(test_x, test_J_fit, label='J-band Spline')
    plt.plot(test_x, test_K_fit, label='K-band Spline')
    plt.plot(knots, test_J_knots, 'x', color='C2')
    plt.plot(knots, test_K_knots, 'x', color='C3')
    plt.xlabel('Restframe Months')
    plt.ylabel('Normalised Flux')
    plt.title(str(varydata['ID'][n])+' z = '+str(z[n]))
#    plt.xticks(inds, month_ticks, rotation = 'vertical')
    plt.legend()
    plt.tight_layout()
#    plt.savefig(filepath+'fits/'+str(varydata['ID'][n])+'.png', overwrite=True)
#    plt.close('all')
コード例 #59
0
def plot_chart(fig,
               t,
               num_top_cities,
               pop_limit,
               names,
               pop_fns,
               lats,
               longs,
               colors,
               compress_pops=False,
               save_img=False,
               in_animation=False,
               show_map=True,
               show_map_labels=True,
               show_chart=True):
    assert show_map or show_chart, "Neither map nor chart requested"

    assert t >= 1901, "t (year) must be >= 1901"
    assert t < 2012, "t (year) must be < 2012"

    assert num_top_cities >= 10, "num_top_cities must be >= 10"
    assert num_top_cities <= 50, "num_top_cities must be <= 50"

    assert pop_limit >= 100000, "pop_limit must be >= 1,00,000"
    assert pop_limit <= 20000000, "pop_limit must be <= 2,00,00,000"

    if in_animation:
        assert not save_img, "save_img cannot be True if in_animation is True"

    if in_animation:
        fig.clear()

    map_pop_limit = pop_limit
    label_pop_limit = pop_limit
    max_labels = num_top_cities
    num_chart_cities = num_top_cities

    chart_text_size = 22.5 - 0.25 * num_chart_cities
    map_text_size = 22.5 - 0.25 * max_labels

    # Population at time t
    pops = [interpolate.splev(t, pop_fn, der=0) for pop_fn in pop_fns]

    # Show map
    if show_map:
        # Cities to show on map
        show_cities = [
            (p, n, c, la, lo)
            for (p, n, c, la,
                 lo) in sorted(zip(pops, names, colors, lats, longs))
            if p >= map_pop_limit
        ]
        show_pops = [p for p, _, _, _, _ in show_cities]
        show_colors = [c for _, _, c, _, _ in show_cities]
        show_lats = [la for _, _, _, la, _ in show_cities]
        show_longs = [lo for _, _, _, _, lo in show_cities]

        # Basemap
        if show_chart:
            map_ax = fig.add_subplot(121)
        else:
            map_ax = fig.add_subplot(111)

        map_title = "Cities with Population > " + pretty_number(
            map_pop_limit) + " " + "(" + str(int(t)) + ")"
        if show_map_labels:
            map_title += " : Top " + str(max_labels) + " labeled"
        map_ax.set_title(map_title, size=map_text_size)

        m = basemap.Basemap(projection='aeqd',
                            resolution='l',
                            lat_0=20,
                            lon_0=80,
                            llcrnrlat=5,
                            llcrnrlon=68,
                            urcrnrlat=35,
                            urcrnrlon=100)
        m.drawlsmask(land_color='white', ocean_color='lightskyblue')
        m.drawcountries(linewidth=0.5)

        # Show cities on map
        m.scatter(show_longs,
                  show_lats,
                  latlon=True,
                  s=[p / 50000 for p in show_pops],
                  alpha=0.5,
                  c=show_colors)

        # Show labels on map
        if show_map_labels:
            # Labels to show on map
            label_cities = [(p, n, c, la, lo)
                            for (p, n, c, la, lo) in show_cities
                            if p >= label_pop_limit
                            ][-min(len(show_cities), max_labels):]

            for p, n, _, la, lo in label_cities:
                label_name = pretty_name(n)
                if label_name in left_align:
                    ha = 'right'
                    lo_adjust = -math.sqrt(p) / 8000
                else:
                    ha = 'left'
                    lo_adjust = math.sqrt(p) / 8000
                if label_name in low_align:
                    va = 'top'
                elif label_name in high_align:
                    va = 'bottom'
                else:
                    va = 'center'
                map_ax.annotate(label_name,
                                xy=m(lo + lo_adjust, la),
                                va=va,
                                ha=ha,
                                size=map_text_size)

    # Show chart
    if show_chart:
        # Top cities to show in chart
        top_cities = sorted(zip(pops, names, colors))[-num_chart_cities:]
        top_names = [n for _, n, _ in top_cities]
        top_pops = [p for p, _, _ in top_cities]
        top_colors = [c for _, _, c in top_cities]
        top_labels = [pretty_text(p, n) for p, n, _ in top_cities]

        if show_map:
            chart_ax = fig.add_subplot(122)
        else:
            chart_ax = fig.add_subplot(111)
        chart_ax.set_title(str(num_chart_cities) + " Largest Cities",
                           size=chart_text_size)
        chart_ax.spines['right'].set_color('none')
        chart_ax.spines['left'].set_color('none')
        chart_ax.spines['top'].set_color('none')
        chart_ax.spines['bottom'].set_color('none')
        chart_ax.xaxis.set_major_locator(ticker.NullLocator())
        chart_ax.yaxis.set_major_locator(ticker.NullLocator())
        chart_ax.xaxis.set_major_formatter(ticker.NullFormatter())
        chart_ax.xaxis.set_major_formatter(ticker.NullFormatter())
        if compress_pops:
            chart_ax.set_xlim(0, 20000000)
        chart_ax.barh(top_labels, top_pops, alpha=0.5, color=top_colors)
        for l in top_labels:
            chart_ax.annotate(l,
                              xy=(0.1, l),
                              va='center',
                              size=chart_text_size)

    if in_animation:
        plt.draw()
    else:
        if save_img:
            plt.savefig('output/' + 'map_' + str(num_top_cities) + '_' +
                        str(t) + '.png')
        else:
            plt.show()
コード例 #60
0
ファイル: InterpolateField.py プロジェクト: dhidas/SRWTools
#!/usr/bin/env python

import sys
from SRWToolsUtil import *
import matplotlib.pyplot as plt

import numpy
from scipy import interpolate

# grab data from file
[Z, Bx, By, Bz] = ReadHallProbeData(sys.argv[1])

fo = open(sys.argv[2], 'w')

ZNew = numpy.arange(Z[0], Z[-1:][0], (Z[-1:][0] - Z[0]) / 100000.)

tckX = interpolate.splrep(Z, Bx, s=0)
BxNew = interpolate.splev(ZNew, tckX, der=0)

tckY = interpolate.splrep(Z, By, s=0)
ByNew = interpolate.splev(ZNew, tckY, der=0)

tckZ = interpolate.splrep(Z, Bz, s=0)
BzNew = interpolate.splev(ZNew, tckZ, der=0)

for i in range(len(ZNew)):
    l = map(str, [ZNew[i], BxNew[i], ByNew[i], BzNew[i]])
    fo.write(' '.join(l) + '\n')